You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2015/07/16 06:40:48 UTC

[01/50] [abbrv] hive git commit: HIVE-11179: HIVE should allow custom converting from HivePrivilegeObjectDesc to privilegeObject for different authorizers(Dapeng Sun, Reviewed by Ferdinand Xu)

Repository: hive
Updated Branches:
  refs/heads/beeline-cli 0ac8f6c48 -> e6adedc1c


HIVE-11179: HIVE should allow custom converting from HivePrivilegeObjectDesc to privilegeObject for different authorizers(Dapeng Sun, Reviewed by Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/02e762f9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/02e762f9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/02e762f9

Branch: refs/heads/beeline-cli
Commit: 02e762f94762f0f2dcd71cd59a4d36f19522606e
Parents: 10dc20f
Author: Ferdinand Xu <ch...@intel.com>
Authored: Wed Jul 8 00:57:35 2015 -0400
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Wed Jul 8 00:57:35 2015 -0400

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 10 ++++-----
 .../authorization/plugin/HiveAuthorizer.java    | 11 ++++++++++
 .../plugin/HiveAuthorizerImpl.java              | 22 ++++++++++++++++++++
 .../authorization/plugin/HiveV1Authorizer.java  | 20 ++++++++++++++++++
 4 files changed, 58 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/02e762f9/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 8bcf860..049857b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -640,7 +640,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
           AuthorizationUtils.getHivePrincipalType(grantOrRevokeRoleDDL.getGrantorType()));
     }
     List<HivePrincipal> principals =
-        AuthorizationUtils.getHivePrincipals(grantOrRevokeRoleDDL.getPrincipalDesc());
+        authorizer.getHivePrincipals(grantOrRevokeRoleDDL.getPrincipalDesc());
     List<String> roles = grantOrRevokeRoleDDL.getRoles();
 
     boolean grantOption = grantOrRevokeRoleDDL.isGrantOption();
@@ -658,7 +658,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     try {
       List<HivePrivilegeInfo> privInfos = authorizer.showPrivileges(
           AuthorizationUtils.getHivePrincipal(showGrantDesc.getPrincipalDesc()),
-          AuthorizationUtils.getHivePrivilegeObject(showGrantDesc.getHiveObj()));
+          authorizer.getHivePrivilegeObject(showGrantDesc.getHiveObj()));
       boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST);
       writeToFile(writeGrantInfo(privInfos, testMode), showGrantDesc.getResFile());
     } catch (IOException e) {
@@ -675,9 +675,9 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     HiveAuthorizer authorizer = getSessionAuthorizer();
 
     //Convert to object types used by the authorization plugin interface
-    List<HivePrincipal> hivePrincipals = AuthorizationUtils.getHivePrincipals(principals);
-    List<HivePrivilege> hivePrivileges = AuthorizationUtils.getHivePrivileges(privileges);
-    HivePrivilegeObject hivePrivObject = AuthorizationUtils.getHivePrivilegeObject(privSubjectDesc);
+    List<HivePrincipal> hivePrincipals = authorizer.getHivePrincipals(principals);
+    List<HivePrivilege> hivePrivileges = authorizer.getHivePrivileges(privileges);
+    HivePrivilegeObject hivePrivObject = authorizer.getHivePrivilegeObject(privSubjectDesc);
 
     HivePrincipal grantorPrincipal = new HivePrincipal(
         grantor, AuthorizationUtils.getHivePrincipalType(grantorType));

http://git-wip-us.apache.org/repos/asf/hive/blob/02e762f9/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
index 97d9aa9..512772b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
@@ -22,6 +22,10 @@ import java.util.List;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
+import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
+import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc;
 import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
 
 /**
@@ -210,5 +214,12 @@ public interface HiveAuthorizer {
    */
   public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException;
 
+  public List<HivePrincipal> getHivePrincipals(List<PrincipalDesc> principals)
+      throws HiveException;
+
+  public List<HivePrivilege> getHivePrivileges(List<PrivilegeDesc> privileges);
+
+  public HivePrivilegeObject getHivePrivilegeObject(PrivilegeObjectDesc privSubjectDesc)
+      throws HiveException;
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/02e762f9/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerImpl.java
index c555fbf..76a80e0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerImpl.java
@@ -22,6 +22,11 @@ import java.util.List;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
+import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
+import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc;
+import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils;
 
 /**
  * Convenience implementation of HiveAuthorizer.
@@ -134,4 +139,21 @@ public class HiveAuthorizerImpl implements HiveAuthorizer {
   public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException {
     accessController.applyAuthorizationConfigPolicy(hiveConf);
   }
+
+  @Override
+  public List<HivePrincipal> getHivePrincipals(
+      List<PrincipalDesc> principals) throws HiveException {
+    return AuthorizationUtils.getHivePrincipals(principals);
+  }
+
+  @Override
+  public List<HivePrivilege> getHivePrivileges(List<PrivilegeDesc> privileges) {
+    return AuthorizationUtils.getHivePrivileges(privileges);
+  }
+
+  @Override
+  public HivePrivilegeObject getHivePrivilegeObject(
+      PrivilegeObjectDesc privSubjectDesc) throws HiveException {
+    return AuthorizationUtils.getHivePrivilegeObject(privSubjectDesc);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/02e762f9/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java
index 86de47c..c387800 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java
@@ -37,6 +37,9 @@ import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
+import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
+import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc;
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils;
 import org.apache.hadoop.hive.ql.security.authorization.PrivilegeScope;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAccessController;
@@ -378,4 +381,21 @@ public class HiveV1Authorizer implements HiveAuthorizer {
     // do no filtering in old authorizer
     return listObjs;
   }
+
+  @Override
+  public List<HivePrincipal> getHivePrincipals(
+      List<PrincipalDesc> principals) throws HiveException {
+    return AuthorizationUtils.getHivePrincipals(principals);
+  }
+
+  @Override
+  public List<HivePrivilege> getHivePrivileges(List<PrivilegeDesc> privileges) {
+    return AuthorizationUtils.getHivePrivileges(privileges);
+  }
+
+  @Override
+  public HivePrivilegeObject getHivePrivilegeObject(
+      PrivilegeObjectDesc privSubjectDesc) throws HiveException {
+    return AuthorizationUtils.getHivePrivilegeObject(privSubjectDesc);
+  }
 }


[03/50] [abbrv] hive git commit: HIVE-10795: Remove use of PerfLogger from Orc (Owen O'Malley reviewed by Prasanth Jayachandran)

Posted by xu...@apache.org.
HIVE-10795: Remove use of PerfLogger from Orc (Owen O'Malley reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/527497cc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/527497cc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/527497cc

Branch: refs/heads/beeline-cli
Commit: 527497cc85144013ca0bb44fb3aa1fba9de8e052
Parents: 1280cca
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Tue Jul 7 23:08:07 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Tue Jul 7 23:08:07 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   | 20 ++++++++++----------
 .../hive/ql/io/orc/OrcNewInputFormat.java       | 16 ++++++++++------
 .../apache/hadoop/hive/ql/log/PerfLogger.java   |  1 -
 3 files changed, 20 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/527497cc/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 7346bc4..8864013 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.TruthValue;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
-import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -122,9 +121,6 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
   private static final long DEFAULT_MIN_SPLIT_SIZE = 16 * 1024 * 1024;
   private static final long DEFAULT_MAX_SPLIT_SIZE = 256 * 1024 * 1024;
 
-  private static final PerfLogger perfLogger = PerfLogger.getPerfLogger();
-  private static final String CLASS_NAME = ReaderImpl.class.getName();
-
   /**
    * When picking the hosts for a split that crosses block boundaries,
    * any drop any host that has fewer than MIN_INCLUDED_LOCATION of the
@@ -490,7 +486,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
       context.numFilesCounter.incrementAndGet();
       FileInfo fileInfo = Context.footerCache.getIfPresent(file.getPath());
       if (fileInfo != null) {
-        if (LOG.isDebugEnabled()) {
+        if (isDebugEnabled) {
           LOG.debug("Info cached for path: " + file.getPath());
         }
         if (fileInfo.modificationTime == file.getModificationTime() &&
@@ -501,7 +497,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
         } else {
           // Invalidate
           Context.footerCache.invalidate(file.getPath());
-          if (LOG.isDebugEnabled()) {
+          if (isDebugEnabled) {
             LOG.debug("Meta-Info for : " + file.getPath() +
                 " changed. CachedModificationTime: "
                 + fileInfo.modificationTime + ", CurrentModificationTime: "
@@ -511,7 +507,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
           }
         }
       } else {
-        if (LOG.isDebugEnabled()) {
+        if (isDebugEnabled) {
           LOG.debug("Info not cached for path: " + file.getPath());
         }
       }
@@ -871,7 +867,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
               includeStripe[i] = (i >= stripeStats.size()) ||
                   isStripeSatisfyPredicate(stripeStats.get(i), sarg,
                       filterColumns);
-              if (LOG.isDebugEnabled() && !includeStripe[i]) {
+              if (isDebugEnabled && !includeStripe[i]) {
                 LOG.debug("Eliminating ORC stripe-" + i + " of file '" +
                     file.getPath() + "'  as it did not satisfy " +
                     "predicate condition.");
@@ -1060,9 +1056,13 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
   @Override
   public InputSplit[] getSplits(JobConf job,
                                 int numSplits) throws IOException {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ORC_GET_SPLITS);
+    if (isDebugEnabled) {
+      LOG.debug("getSplits started");
+    }
     List<OrcSplit> result = generateSplitsInfo(job, numSplits);
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ORC_GET_SPLITS);
+    if (isDebugEnabled) {
+      LOG.debug("getSplits finished");
+    }
     return result.toArray(new InputSplit[result.size()]);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/527497cc/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java
index b6ad0dc..1833d3d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java
@@ -21,10 +21,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputFormat;
@@ -37,8 +37,8 @@ import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 /** An InputFormat for ORC files. Keys are meaningless,
  * value is the OrcStruct object */
 public class OrcNewInputFormat extends InputFormat<NullWritable, OrcStruct>{
-  private static final PerfLogger perfLogger = PerfLogger.getPerfLogger();
-  private static final String CLASS_NAME = ReaderImpl.class.getName();
+
+  private static final Log LOG = LogFactory.getLog(OrcNewInputFormat.class);
 
   @Override
   public RecordReader<NullWritable, OrcStruct> createRecordReader(
@@ -117,7 +117,9 @@ public class OrcNewInputFormat extends InputFormat<NullWritable, OrcStruct>{
   @Override
   public List<InputSplit> getSplits(JobContext jobContext)
       throws IOException, InterruptedException {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ORC_GET_SPLITS);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("getSplits started");
+    }
     List<OrcSplit> splits =
         OrcInputFormat.generateSplitsInfo(ShimLoader.getHadoopShims()
         .getConfiguration(jobContext));
@@ -125,7 +127,9 @@ public class OrcNewInputFormat extends InputFormat<NullWritable, OrcStruct>{
     for(OrcSplit split: splits) {
       result.add(new OrcNewSplit(split));
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ORC_GET_SPLITS);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("getSplits finished");
+    }
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/527497cc/ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
index f202991..20ca195 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
@@ -63,7 +63,6 @@ public class PerfLogger {
   public static final String TEZ_RUN_PROCESSOR = "TezRunProcessor";
   public static final String TEZ_INIT_OPERATORS = "TezInitializeOperators";
   public static final String LOAD_HASHTABLE = "LoadHashtable";
-  public static final String ORC_GET_SPLITS = "OrcGetSplits";
 
   public static final String SPARK_SUBMIT_TO_RUNNING = "SparkSubmitToRunning";
   public static final String SPARK_BUILD_PLAN = "SparkBuildPlan";


[27/50] [abbrv] hive git commit: HIVE-11129 : Issue a warning when copied from UTF-8 to ISO 8859-1 (Aihua Xu via Szehon)

Posted by xu...@apache.org.
HIVE-11129 : Issue a warning when copied from UTF-8 to ISO 8859-1 (Aihua Xu via Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2620ebbc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2620ebbc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2620ebbc

Branch: refs/heads/beeline-cli
Commit: 2620ebbc6722a31d40f8f0d1267a9e21cbe19470
Parents: 3301b92
Author: Szehon Ho <sz...@cloudera.com>
Authored: Mon Jul 13 11:46:56 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Mon Jul 13 11:46:56 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/serde2/AbstractEncodingAwareSerDe.java | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2620ebbc/serde/src/java/org/apache/hadoop/hive/serde2/AbstractEncodingAwareSerDe.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/AbstractEncodingAwareSerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/AbstractEncodingAwareSerDe.java
index 3668c56..efc4c7e 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/AbstractEncodingAwareSerDe.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/AbstractEncodingAwareSerDe.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hive.serde2;
 import java.nio.charset.Charset;
 import java.util.Properties;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -34,7 +36,7 @@ import com.google.common.base.Charsets;
  * transform data from UTF-8 to specified charset during deserialize.
  */
 public abstract class AbstractEncodingAwareSerDe extends AbstractSerDe {
-
+  private static final Log LOG = LogFactory.getLog(AbstractEncodingAwareSerDe.class);
   protected Charset charset;
 
   @Override
@@ -42,6 +44,9 @@ public abstract class AbstractEncodingAwareSerDe extends AbstractSerDe {
   public void initialize(Configuration conf, Properties tbl)
       throws SerDeException {
     charset = Charset.forName(tbl.getProperty(serdeConstants.SERIALIZATION_ENCODING, "UTF-8"));
+    if (this.charset.equals(Charsets.ISO_8859_1) || this.charset.equals(Charsets.US_ASCII)) {
+      LOG.warn("The data may not be properly converted to target charset " + charset);
+    }
   }
 
   @Override


[14/50] [abbrv] hive git commit: HIVE-11193 : ConstantPropagateProcCtx should use a Set instead of a List to hold operators to be deleted (Wei Zheng via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-11193 : ConstantPropagateProcCtx should use a Set instead of a List to hold operators to be deleted (Wei Zheng via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d3144256
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d3144256
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d3144256

Branch: refs/heads/beeline-cli
Commit: d3144256d90d78392c897a63e58b512b46ab1608
Parents: 8207489
Author: Wei Zheng <wz...@hortonworks.com>
Authored: Thu Jul 9 13:26:00 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Jul 9 16:52:54 2015 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   1 +
 .../hive/ql/optimizer/ConstantPropagate.java    |   1 +
 .../ql/optimizer/ConstantPropagateProcCtx.java  |  10 +-
 .../test/queries/clientpositive/constprog_dpp.q |  17 +++
 .../clientpositive/tez/constprog_dpp.q.out      | 113 +++++++++++++++++++
 5 files changed, 137 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d3144256/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 441b278..8773bd3 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -318,6 +318,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
 
 minitez.query.files=bucket_map_join_tez1.q,\
   bucket_map_join_tez2.q,\
+  constprog_dpp.q,\
   dynamic_partition_pruning.q,\
   dynamic_partition_pruning_2.q,\
   explainuser_1.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/d3144256/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
index b5ee4ef..dd53ced 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
@@ -125,6 +125,7 @@ public class ConstantPropagate implements Transform {
       }
       opToDelete.getParentOperators().get(0).removeChildAndAdoptItsChildren(opToDelete);
     }
+    cppCtx.getOpToDelete().clear();
     return pGraphContext;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d3144256/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
index f30e330..d0b10c3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
@@ -20,12 +20,12 @@ package org.apache.hadoop.hive.ql.optimizer;
 
 
 import java.io.Serializable;
-import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Set;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -53,7 +53,7 @@ public class ConstantPropagateProcCtx implements NodeProcessorCtx {
       .getLog(ConstantPropagateProcCtx.class);
 
   private final Map<Operator<? extends Serializable>, Map<ColumnInfo, ExprNodeDesc>> opToConstantExprs;
-  private final List<Operator<? extends Serializable>> opToDelete;
+  private final Set<Operator<? extends Serializable>> opToDelete;
   private ConstantPropagateOption constantPropagateOption = ConstantPropagateOption.FULL;
 
   public ConstantPropagateProcCtx() {
@@ -63,7 +63,7 @@ public class ConstantPropagateProcCtx implements NodeProcessorCtx {
   public ConstantPropagateProcCtx(ConstantPropagateOption option) {
     opToConstantExprs =
         new HashMap<Operator<? extends Serializable>, Map<ColumnInfo, ExprNodeDesc>>();
-    opToDelete = new ArrayList<Operator<? extends Serializable>>();
+    opToDelete = new HashSet<Operator<? extends Serializable>>();
     this.constantPropagateOption = option;
   }
 
@@ -193,7 +193,7 @@ public class ConstantPropagateProcCtx implements NodeProcessorCtx {
     opToDelete.add(op);
   }
 
-  public List<Operator<? extends Serializable>> getOpToDelete() {
+  public Set<Operator<? extends Serializable>> getOpToDelete() {
     return opToDelete;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d3144256/ql/src/test/queries/clientpositive/constprog_dpp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/constprog_dpp.q b/ql/src/test/queries/clientpositive/constprog_dpp.q
new file mode 100644
index 0000000..f1c9b13
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/constprog_dpp.q
@@ -0,0 +1,17 @@
+set hive.execution.engine=tez;
+set hive.optimize.constant.propagation=true;
+set hive.tez.dynamic.partition.pruning=true;
+
+drop table if exists tb1;
+create table tb1 (id int);
+
+drop table if exists tb2;
+create table tb2 (id smallint);
+
+explain
+select a.id from tb1 a
+left outer join
+(select id from tb2
+union all
+select 2 as id from tb2 limit 1) b
+on a.id=b.id;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/d3144256/ql/src/test/results/clientpositive/tez/constprog_dpp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/constprog_dpp.q.out b/ql/src/test/results/clientpositive/tez/constprog_dpp.q.out
new file mode 100644
index 0000000..b2b2371
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/constprog_dpp.q.out
@@ -0,0 +1,113 @@
+PREHOOK: query: drop table if exists tb1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists tb1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table tb1 (id int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tb1
+POSTHOOK: query: create table tb1 (id int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tb1
+PREHOOK: query: drop table if exists tb2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists tb2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table tb2 (id smallint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tb2
+POSTHOOK: query: create table tb2 (id smallint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tb2
+PREHOOK: query: explain
+select a.id from tb1 a
+left outer join
+(select id from tb2
+union all
+select 2 as id from tb2 limit 1) b
+on a.id=b.id
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.id from tb1 a
+left outer join
+(select id from tb2
+union all
+select 2 as id from tb2 limit 1) b
+on a.id=b.id
+POSTHOOK: type: QUERY
+Plan not optimized by CBO due to missing statistics. Please check log for more details.
+
+Vertex dependency in root stage
+Map 1 <- Union 2 (CONTAINS)
+Map 5 <- Union 2 (CONTAINS)
+Reducer 3 <- Union 2 (SIMPLE_EDGE)
+Reducer 4 <- Map 6 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
+
+Stage-0
+   Fetch Operator
+      limit:-1
+      Stage-1
+         Reducer 4
+         File Output Operator [FS_16]
+            compressed:false
+            Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            Merge Join Operator [MERGEJOIN_20]
+            |  condition map:[{"":"Left Outer Join0 to 1"}]
+            |  keys:{"1":"_col0 (type: int)","0":"id (type: int)"}
+            |  outputColumnNames:["_col0"]
+            |  Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            |<-Map 6 [SIMPLE_EDGE]
+            |  Reduce Output Operator [RS_12]
+            |     key expressions:id (type: int)
+            |     Map-reduce partition columns:id (type: int)
+            |     sort order:+
+            |     Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            |     TableScan [TS_11]
+            |        alias:a
+            |        Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            |<-Reducer 3 [SIMPLE_EDGE]
+               Reduce Output Operator [RS_13]
+                  key expressions:_col0 (type: int)
+                  Map-reduce partition columns:_col0 (type: int)
+                  sort order:+
+                  Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                  Limit [LIM_10]
+                     Number of rows:1
+                     Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                     Select Operator [SEL_9]
+                     |  outputColumnNames:["_col0"]
+                     |  Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                     |<-Union 2 [SIMPLE_EDGE]
+                        |<-Map 1 [CONTAINS]
+                        |  Reduce Output Operator [RS_8]
+                        |     sort order:
+                        |     Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        |     value expressions:_col0 (type: int)
+                        |     Limit [LIM_7]
+                        |        Number of rows:1
+                        |        Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        |        Select Operator [SEL_1]
+                        |           outputColumnNames:["_col0"]
+                        |           Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                        |           TableScan [TS_0]
+                        |              alias:tb2
+                        |              Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                        |<-Map 5 [CONTAINS]
+                           Reduce Output Operator [RS_8]
+                              sort order:
+                              Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                              value expressions:_col0 (type: int)
+                              Limit [LIM_7]
+                                 Number of rows:1
+                                 Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                                 Select Operator [SEL_3]
+                                    outputColumnNames:["_col0"]
+                                    Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                                    TableScan [TS_2]
+                                       alias:tb2
+                                       Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+


[44/50] [abbrv] hive git commit: HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
index 4a15fae..bec4f6a 100644
--- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
@@ -137,8 +137,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	fact_daily          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
index 177da44..646f8b8 100644
--- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
@@ -142,8 +142,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	fact_daily          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out
index d0eb853..fabd0d6 100644
--- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out
@@ -160,8 +160,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	fact_daily          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge3.q.out b/ql/src/test/results/clientpositive/merge3.q.out
index 2ca40f4..b237631 100644
--- a/ql/src/test/results/clientpositive/merge3.q.out
+++ b/ql/src/test/results/clientpositive/merge3.q.out
@@ -2361,7 +2361,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_analyze.q.out b/ql/src/test/results/clientpositive/orc_analyze.q.out
index a61a2e6..6eb9a93 100644
--- a/ql/src/test/results/clientpositive/orc_analyze.q.out
+++ b/ql/src/test/results/clientpositive/orc_analyze.q.out
@@ -98,7 +98,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -147,7 +146,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -196,7 +194,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -286,7 +283,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -404,8 +400,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -449,8 +443,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -506,8 +498,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -551,8 +541,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -608,8 +596,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -653,8 +639,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -755,8 +739,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -800,8 +782,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -922,8 +902,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -967,8 +945,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1024,8 +1000,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1069,8 +1043,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1126,8 +1098,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1171,8 +1141,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1279,8 +1247,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1324,8 +1290,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1491,8 +1455,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1536,8 +1498,6 @@ Partition Value:    	[OH]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -1595,8 +1555,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1640,8 +1598,6 @@ Partition Value:    	[OH]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -1699,8 +1655,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1744,8 +1698,6 @@ Partition Value:    	[OH]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/orc_create.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_create.q.out b/ql/src/test/results/clientpositive/orc_create.q.out
index bffb58d..e294dec 100644
--- a/ql/src/test/results/clientpositive/orc_create.q.out
+++ b/ql/src/test/results/clientpositive/orc_create.q.out
@@ -62,7 +62,6 @@ strct               	struct<A:string,B:string>
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -113,7 +112,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -167,7 +165,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -211,7 +208,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -265,7 +261,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -316,7 +311,6 @@ strct               	struct<A:string,B:string>
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/parallel_orderby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parallel_orderby.q.out b/ql/src/test/results/clientpositive/parallel_orderby.q.out
index 2f4ac8f..4b7de52 100644
--- a/ql/src/test/results/clientpositive/parallel_orderby.q.out
+++ b/ql/src/test/results/clientpositive/parallel_orderby.q.out
@@ -105,7 +105,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -219,7 +218,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/parquet_array_null_element.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_array_null_element.q.out b/ql/src/test/results/clientpositive/parquet_array_null_element.q.out
index 4243131..387f01e 100644
--- a/ql/src/test/results/clientpositive/parquet_array_null_element.q.out
+++ b/ql/src/test/results/clientpositive/parquet_array_null_element.q.out
@@ -66,7 +66,6 @@ mp                  	map<string,string>
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/parquet_create.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_create.q.out b/ql/src/test/results/clientpositive/parquet_create.q.out
index 32c4db8..c6d33ff 100644
--- a/ql/src/test/results/clientpositive/parquet_create.q.out
+++ b/ql/src/test/results/clientpositive/parquet_create.q.out
@@ -69,7 +69,6 @@ strct               	struct<A:string,B:string>
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/parquet_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_partitioned.q.out b/ql/src/test/results/clientpositive/parquet_partitioned.q.out
index 2aafb29..3529d70 100644
--- a/ql/src/test/results/clientpositive/parquet_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/parquet_partitioned.q.out
@@ -63,7 +63,6 @@ part                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/parquet_serde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_serde.q.out b/ql/src/test/results/clientpositive/parquet_serde.q.out
index e753180..fb2344a 100644
--- a/ql/src/test/results/clientpositive/parquet_serde.q.out
+++ b/ql/src/test/results/clientpositive/parquet_serde.q.out
@@ -70,8 +70,6 @@ Partition Value:    	[20140330]
 Database:           	default             	 
 Table:              	parquet_mixed_fileformat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -137,7 +135,6 @@ dateint             	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -177,8 +174,6 @@ Partition Value:    	[20140330]
 Database:           	default             	 
 Table:              	parquet_mixed_fileformat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out b/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out
index 144d89e..82f4750 100644
--- a/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out
+++ b/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out
@@ -37,8 +37,6 @@ Partition Value:    	[v1]
 Database:           	default             	 
 Table:              	mytbl               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	a                   	myval               
 	b                   	yourval             

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/part_inherit_tbl_props_empty.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/part_inherit_tbl_props_empty.q.out b/ql/src/test/results/clientpositive/part_inherit_tbl_props_empty.q.out
index 758712f..e4b8003 100644
--- a/ql/src/test/results/clientpositive/part_inherit_tbl_props_empty.q.out
+++ b/ql/src/test/results/clientpositive/part_inherit_tbl_props_empty.q.out
@@ -33,8 +33,6 @@ Partition Value:    	[v1]
 Database:           	default             	 
 Table:              	mytbl               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 #### A masked pattern was here ####
 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/part_inherit_tbl_props_with_star.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/part_inherit_tbl_props_with_star.q.out b/ql/src/test/results/clientpositive/part_inherit_tbl_props_with_star.q.out
index 85ad687..106448a 100644
--- a/ql/src/test/results/clientpositive/part_inherit_tbl_props_with_star.q.out
+++ b/ql/src/test/results/clientpositive/part_inherit_tbl_props_with_star.q.out
@@ -37,8 +37,6 @@ Partition Value:    	[v1]
 Database:           	default             	 
 Table:              	mytbl               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	a                   	myval               
 	b                   	yourval             

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
index 9e594c9..86099cb 100644
--- a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
+++ b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
@@ -44,8 +44,6 @@ Partition Value:    	[100, 20000, 300000000000]
 Database:           	default             	 
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 #### A masked pattern was here ####
 	 	 
@@ -93,8 +91,6 @@ Partition Value:    	[100, 20000, 300000000000]
 Database:           	default             	 
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -148,8 +144,6 @@ Partition Value:    	[100, 20000, 300000000000]
 Database:           	default             	 
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -204,8 +198,6 @@ Partition Value:    	[110, 22000, 330000000000]
 Database:           	default             	 
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -375,8 +367,6 @@ Partition Value:    	[110, 22000, 330000000000]
 Database:           	default             	 
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 #### A masked pattern was here ####
@@ -428,8 +418,6 @@ Partition Value:    	[110, 22000, 330000000000]
 Database:           	default             	 
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 #### A masked pattern was here ####
@@ -509,8 +497,6 @@ Partition Value:    	[100, 20000, 300000000000]
 Database:           	default             	 
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -562,8 +548,6 @@ Partition Value:    	[100, 20000, 300000000000]
 Database:           	default             	 
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/protectmode2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/protectmode2.q.out b/ql/src/test/results/clientpositive/protectmode2.q.out
index 4ac3e61..ee04393 100644
--- a/ql/src/test/results/clientpositive/protectmode2.q.out
+++ b/ql/src/test/results/clientpositive/protectmode2.q.out
@@ -136,7 +136,6 @@ p                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	NO_DROP_CASCADE     	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -180,7 +179,6 @@ p                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/rcfile_default_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rcfile_default_format.q.out b/ql/src/test/results/clientpositive/rcfile_default_format.q.out
index e584c8b..ee58509 100644
--- a/ql/src/test/results/clientpositive/rcfile_default_format.q.out
+++ b/ql/src/test/results/clientpositive/rcfile_default_format.q.out
@@ -19,7 +19,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -60,7 +59,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -112,7 +110,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -158,7 +155,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -204,7 +200,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -247,7 +242,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -288,7 +282,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -331,7 +324,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/selectDistinctStar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/selectDistinctStar.q.out b/ql/src/test/results/clientpositive/selectDistinctStar.q.out
index 8867b8d..45d9882 100644
--- a/ql/src/test/results/clientpositive/selectDistinctStar.q.out
+++ b/ql/src/test/results/clientpositive/selectDistinctStar.q.out
@@ -1363,7 +1363,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -3794,7 +3793,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out
index 43cc4ef..cefe069 100644
--- a/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out
+++ b/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out
@@ -85,7 +85,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -137,7 +136,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -242,8 +240,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	src_orc_merge_test_part_stat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -293,8 +289,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	src_orc_merge_test_part_stat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -352,8 +346,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	src_orc_merge_test_part_stat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/bucket5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket5.q.out b/ql/src/test/results/clientpositive/spark/bucket5.q.out
index 45f2ace..a72db3b 100644
--- a/ql/src/test/results/clientpositive/spark/bucket5.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket5.q.out
@@ -370,7 +370,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ctas.q.out b/ql/src/test/results/clientpositive/spark/ctas.q.out
index cebe3ab..6bb2f76 100644
--- a/ql/src/test/results/clientpositive/spark/ctas.q.out
+++ b/ql/src/test/results/clientpositive/spark/ctas.q.out
@@ -142,7 +142,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -286,7 +285,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -430,7 +428,6 @@ conb                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -495,7 +492,6 @@ conb                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -640,7 +636,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
index 2852ae9..5573c0a 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
@@ -49,8 +49,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_bucketed 	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out
index 3d4eb18..05bbc26 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out
@@ -49,8 +49,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -110,8 +108,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	0                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
index 94adb3d..f39cd57 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
@@ -144,8 +144,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -315,8 +313,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -432,8 +428,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -571,8 +565,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/infer_bucket_sort_merge.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_merge.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_merge.q.out
index d947eb5..4db9e35 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_merge.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_merge.q.out
@@ -49,8 +49,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -104,8 +102,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
index c57530e..b9dc290 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
@@ -154,8 +154,6 @@ Partition Value:    	[2008-04-08, 0]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -196,8 +194,6 @@ Partition Value:    	[2008-04-08, 1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/infer_bucket_sort_reducers_power_two.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_reducers_power_two.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_reducers_power_two.q.out
index c9072e5..15b7992 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_reducers_power_two.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_reducers_power_two.q.out
@@ -49,8 +49,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	5                   
@@ -104,8 +102,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	5                   
@@ -159,8 +155,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -214,8 +208,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	7                   
@@ -269,8 +261,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	5                   
@@ -326,8 +316,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
index d765eaf..2f84758 100644
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
@@ -255,8 +255,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
index 52a76e6..12f41eb 100644
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
@@ -255,8 +255,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out
index e38ccf8..11ffff8 100644
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out
@@ -315,8 +315,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.8.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.8.out
index dade6bb..23dc6a3 100644
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.8.out
@@ -315,8 +315,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out
index 17bd5bb..3ee9b5a 100644
Binary files a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out and b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out differ

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_analyze.q.out b/ql/src/test/results/clientpositive/spark/orc_analyze.q.out
index 60a23d4..121142e 100644
--- a/ql/src/test/results/clientpositive/spark/orc_analyze.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_analyze.q.out
@@ -98,7 +98,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -188,7 +187,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -306,8 +304,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -351,8 +347,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -453,8 +447,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -498,8 +490,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -620,8 +610,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -665,8 +653,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -773,8 +759,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -818,8 +802,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -985,8 +967,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1030,8 +1010,6 @@ Partition Value:    	[OH]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out b/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
index 03314ea..308b82c 100644
--- a/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
+++ b/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
@@ -111,7 +111,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -225,7 +224,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats1.q.out b/ql/src/test/results/clientpositive/spark/stats1.q.out
index ec2edc4..ab1f993 100644
--- a/ql/src/test/results/clientpositive/spark/stats1.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats1.q.out
@@ -170,7 +170,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -220,7 +219,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats10.q.out b/ql/src/test/results/clientpositive/spark/stats10.q.out
index c840ab7..9c5090f 100644
--- a/ql/src/test/results/clientpositive/spark/stats10.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats10.q.out
@@ -422,8 +422,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	bucket3_1           	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -463,8 +461,6 @@ Partition Value:    	[2]
 Database:           	default             	 
 Table:              	bucket3_1           	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -502,7 +498,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats12.q.out b/ql/src/test/results/clientpositive/spark/stats12.q.out
index db575df..4a5f075 100644
--- a/ql/src/test/results/clientpositive/spark/stats12.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats12.q.out
@@ -208,7 +208,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -247,8 +246,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -289,8 +286,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -331,8 +326,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -373,8 +366,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats13.q.out b/ql/src/test/results/clientpositive/spark/stats13.q.out
index f38f876..452d4bc 100644
--- a/ql/src/test/results/clientpositive/spark/stats13.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats13.q.out
@@ -158,7 +158,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -197,8 +196,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -239,8 +236,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -281,8 +276,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -323,8 +316,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -371,7 +362,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats14.q.out b/ql/src/test/results/clientpositive/spark/stats14.q.out
index f12b136..f34720d 100644
--- a/ql/src/test/results/clientpositive/spark/stats14.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats14.q.out
@@ -38,7 +38,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -142,7 +141,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -181,8 +179,6 @@ Partition Value:    	[2010-04-08, 11]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -223,8 +219,6 @@ Partition Value:    	[2010-04-08, 12]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -283,7 +277,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats15.q.out b/ql/src/test/results/clientpositive/spark/stats15.q.out
index a60dee2..aad2e3a 100644
--- a/ql/src/test/results/clientpositive/spark/stats15.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats15.q.out
@@ -38,7 +38,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -142,7 +141,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -181,8 +179,6 @@ Partition Value:    	[2010-04-08, 11]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -223,8 +219,6 @@ Partition Value:    	[2010-04-08, 12]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -283,7 +277,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats16.q.out b/ql/src/test/results/clientpositive/spark/stats16.q.out
index 3f0f2ea..2e3cadb 100644
--- a/ql/src/test/results/clientpositive/spark/stats16.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats16.q.out
@@ -24,7 +24,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -73,7 +72,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats18.q.out b/ql/src/test/results/clientpositive/spark/stats18.q.out
index a061846..a7d6ab8 100644
--- a/ql/src/test/results/clientpositive/spark/stats18.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats18.q.out
@@ -44,8 +44,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -94,8 +92,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats2.q.out b/ql/src/test/results/clientpositive/spark/stats2.q.out
index ddc8226..404e34b 100644
--- a/ql/src/test/results/clientpositive/spark/stats2.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats2.q.out
@@ -99,7 +99,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -182,7 +181,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats20.q.out b/ql/src/test/results/clientpositive/spark/stats20.q.out
index 4ac7bc5..d7e52b4 100644
--- a/ql/src/test/results/clientpositive/spark/stats20.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats20.q.out
@@ -39,7 +39,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -89,7 +88,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats3.q.out b/ql/src/test/results/clientpositive/spark/stats3.q.out
index dd3a95b..2afb76e 100644
--- a/ql/src/test/results/clientpositive/spark/stats3.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats3.q.out
@@ -82,7 +82,6 @@ col1                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -227,7 +226,6 @@ pcol2               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats5.q.out b/ql/src/test/results/clientpositive/spark/stats5.q.out
index b61101d..9748469 100644
--- a/ql/src/test/results/clientpositive/spark/stats5.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats5.q.out
@@ -52,7 +52,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats6.q.out b/ql/src/test/results/clientpositive/spark/stats6.q.out
index b4435f2..a387075 100644
--- a/ql/src/test/results/clientpositive/spark/stats6.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats6.q.out
@@ -79,8 +79,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -121,8 +119,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -163,8 +159,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -205,8 +199,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -245,7 +237,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats7.q.out b/ql/src/test/results/clientpositive/spark/stats7.q.out
index 0e49e33..0e095fc 100644
--- a/ql/src/test/results/clientpositive/spark/stats7.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats7.q.out
@@ -93,8 +93,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -135,8 +133,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -175,7 +171,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats8.q.out b/ql/src/test/results/clientpositive/spark/stats8.q.out
index 0213e10..3f4ed63 100644
--- a/ql/src/test/results/clientpositive/spark/stats8.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats8.q.out
@@ -89,8 +89,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -129,7 +127,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -202,8 +199,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -278,8 +273,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -354,8 +347,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -442,8 +433,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -484,8 +473,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -526,8 +513,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -568,8 +553,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -608,7 +591,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats9.q.out b/ql/src/test/results/clientpositive/spark/stats9.q.out
index 2c7daea..7eae829 100644
--- a/ql/src/test/results/clientpositive/spark/stats9.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats9.q.out
@@ -60,7 +60,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_counter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_counter.q.out b/ql/src/test/results/clientpositive/spark/stats_counter.q.out
index e2980e8..8b3dcea 100644
--- a/ql/src/test/results/clientpositive/spark/stats_counter.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_counter.q.out
@@ -32,7 +32,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -80,7 +79,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 


[30/50] [abbrv] hive git commit: HIVE-11240 Change value type from int to long for HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE(Ferdinand Xu, reviewed by Xuefu Zhang)

Posted by xu...@apache.org.
HIVE-11240 Change value type from int to long for HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE(Ferdinand Xu, reviewed by Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/21aecbcf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/21aecbcf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/21aecbcf

Branch: refs/heads/beeline-cli
Commit: 21aecbcfff21d33f1785221be73f880d728632fa
Parents: 5363af9
Author: Ferdinand Xu <ch...@intel.com>
Authored: Tue Jul 14 02:45:54 2015 -0400
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Tue Jul 14 02:45:54 2015 -0400

----------------------------------------------------------------------
 .../src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/21aecbcf/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 920e762..2ef5aa0 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -5995,7 +5995,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       // Server will create new threads up to max as necessary. After an idle
       // period, it will destroy threads to keep the number of threads in the
       // pool to min.
-      int maxMessageSize = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE);
+      long maxMessageSize = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE);
       int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS);
       int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS);
       boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE);


[09/50] [abbrv] hive git commit: HIVE-10895: ObjectStore does not close Query objects in some calls, causing a potential leak in some metastore db resources (Aihua Xu reviewed by Chaoyu Tang, Sergey Shelukhin, Vaibhav Gumashta)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/08595ffa/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 8f52f83..39ab9e7 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -217,6 +217,29 @@ public class ObjectStore implements RawStore, Configurable {
 
   private Pattern partitionValidationPattern;
 
+  /**
+   * A class to pass the Query object to the caller to let the caller release
+   * resources by calling QueryWrapper.query.closeAll() after consuming all the query results.
+   */
+  public static class QueryWrapper {
+    public Query query;
+
+    /**
+     * Explicitly closes the query object to release the resources
+     */
+    public void close() {
+      if (query != null) {
+        query.closeAll();
+        query = null;
+      }
+    }
+
+    @Override
+    protected void finalize() {
+      this.close();
+    }
+  }
+
   public ObjectStore() {
   }
 
@@ -551,10 +574,11 @@ public class ObjectStore implements RawStore, Configurable {
   private MDatabase getMDatabase(String name) throws NoSuchObjectException {
     MDatabase mdb = null;
     boolean commited = false;
+    Query query = null;
     try {
       openTransaction();
       name = HiveStringUtils.normalizeIdentifier(name);
-      Query query = pm.newQuery(MDatabase.class, "name == dbname");
+      query = pm.newQuery(MDatabase.class, "name == dbname");
       query.declareParameters("java.lang.String dbname");
       query.setUnique(true);
       mdb = (MDatabase) query.execute(name);
@@ -564,6 +588,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     if (mdb == null) {
       throw new NoSuchObjectException("There is no database named " + name);
@@ -666,6 +693,7 @@ public class ObjectStore implements RawStore, Configurable {
     boolean success = false;
     LOG.info("Dropping database " + dbname + " along with all tables");
     dbname = HiveStringUtils.normalizeIdentifier(dbname);
+    QueryWrapper queryWrapper = new QueryWrapper();
     try {
       openTransaction();
 
@@ -673,7 +701,7 @@ public class ObjectStore implements RawStore, Configurable {
       MDatabase db = getMDatabase(dbname);
       pm.retrieve(db);
       if (db != null) {
-        List<MDBPrivilege> dbGrants = this.listDatabaseGrants(dbname);
+        List<MDBPrivilege> dbGrants = this.listDatabaseGrants(dbname, queryWrapper);
         if (dbGrants != null && dbGrants.size() > 0) {
           pm.deletePersistentAll(dbGrants);
         }
@@ -684,36 +712,36 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      queryWrapper.close();
     }
     return success;
   }
 
-
   @Override
   public List<String> getDatabases(String pattern) throws MetaException {
     boolean commited = false;
     List<String> databases = null;
+    Query query = null;
     try {
       openTransaction();
       // Take the pattern and split it on the | to get all the composing
       // patterns
       String[] subpatterns = pattern.trim().split("\\|");
-      String query = "select name from org.apache.hadoop.hive.metastore.model.MDatabase where (";
+      String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MDatabase where (";
       boolean first = true;
       for (String subpattern : subpatterns) {
         subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*");
         if (!first) {
-          query = query + " || ";
+          queryStr = queryStr + " || ";
         }
-        query = query + " name.matches(\"" + subpattern + "\")";
+        queryStr = queryStr + " name.matches(\"" + subpattern + "\")";
         first = false;
       }
-      query = query + ")";
-
-      Query q = pm.newQuery(query);
-      q.setResult("name");
-      q.setOrdering("name ascending");
-      Collection names = (Collection) q.execute();
+      queryStr = queryStr + ")";
+      query = pm.newQuery(queryStr);
+      query.setResult("name");
+      query.setOrdering("name ascending");
+      Collection names = (Collection) query.execute();
       databases = new ArrayList<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         databases.add((String) i.next());
@@ -723,6 +751,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return databases;
   }
@@ -781,9 +812,10 @@ public class ObjectStore implements RawStore, Configurable {
   public Type getType(String typeName) {
     Type type = null;
     boolean commited = false;
+    Query query = null;
     try {
       openTransaction();
-      Query query = pm.newQuery(MType.class, "name == typeName");
+      query = pm.newQuery(MType.class, "name == typeName");
       query.declareParameters("java.lang.String typeName");
       query.setUnique(true);
       MType mtype = (MType) query.execute(typeName.trim());
@@ -796,6 +828,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return type;
   }
@@ -803,9 +838,10 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public boolean dropType(String typeName) {
     boolean success = false;
+    Query query = null;
     try {
       openTransaction();
-      Query query = pm.newQuery(MType.class, "name == typeName");
+      query = pm.newQuery(MType.class, "name == typeName");
       query.declareParameters("java.lang.String typeName");
       query.setUnique(true);
       MType type = (MType) query.execute(typeName.trim());
@@ -821,6 +857,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return success;
   }
@@ -956,9 +995,9 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public List<String> getTables(String dbName, String pattern)
-      throws MetaException {
+  public List<String> getTables(String dbName, String pattern) throws MetaException {
     boolean commited = false;
+    Query query = null;
     List<String> tbls = null;
     try {
       openTransaction();
@@ -966,25 +1005,24 @@ public class ObjectStore implements RawStore, Configurable {
       // Take the pattern and split it on the | to get all the composing
       // patterns
       String[] subpatterns = pattern.trim().split("\\|");
-      String query =
-        "select tableName from org.apache.hadoop.hive.metastore.model.MTable "
-        + "where database.name == dbName && (";
+      String queryStr =
+          "select tableName from org.apache.hadoop.hive.metastore.model.MTable "
+              + "where database.name == dbName && (";
       boolean first = true;
       for (String subpattern : subpatterns) {
         subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*");
         if (!first) {
-          query = query + " || ";
+          queryStr = queryStr + " || ";
         }
-        query = query + " tableName.matches(\"" + subpattern + "\")";
+        queryStr = queryStr + " tableName.matches(\"" + subpattern + "\")";
         first = false;
       }
-      query = query + ")";
-
-      Query q = pm.newQuery(query);
-      q.declareParameters("java.lang.String dbName");
-      q.setResult("tableName");
-      q.setOrdering("tableName ascending");
-      Collection names = (Collection) q.execute(dbName);
+      queryStr = queryStr + ")";
+      query = pm.newQuery(queryStr);
+      query.declareParameters("java.lang.String dbName");
+      query.setResult("tableName");
+      query.setOrdering("tableName ascending");
+      Collection names = (Collection) query.execute(dbName);
       tbls = new ArrayList<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         tbls.add((String) i.next());
@@ -994,6 +1032,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return tbls;
   }
@@ -1006,11 +1047,12 @@ public class ObjectStore implements RawStore, Configurable {
   private MTable getMTable(String db, String table) {
     MTable mtbl = null;
     boolean commited = false;
+    Query query = null;
     try {
       openTransaction();
       db = HiveStringUtils.normalizeIdentifier(db);
       table = HiveStringUtils.normalizeIdentifier(table);
-      Query query = pm.newQuery(MTable.class, "tableName == table && database.name == db");
+      query = pm.newQuery(MTable.class, "tableName == table && database.name == db");
       query.declareParameters("java.lang.String table, java.lang.String db");
       query.setUnique(true);
       mtbl = (MTable) query.execute(table, db);
@@ -1020,20 +1062,24 @@ public class ObjectStore implements RawStore, Configurable {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mtbl;
   }
 
   @Override
-  public List<Table> getTableObjectsByName(String db, List<String> tbl_names)
-      throws MetaException, UnknownDBException {
+  public List<Table> getTableObjectsByName(String db, List<String> tbl_names) throws MetaException,
+      UnknownDBException {
     List<Table> tables = new ArrayList<Table>();
     boolean committed = false;
+    Query dbExistsQuery = null;
+    Query query = null;
     try {
       openTransaction();
-
       db = HiveStringUtils.normalizeIdentifier(db);
-      Query dbExistsQuery = pm.newQuery(MDatabase.class, "name == db");
+      dbExistsQuery = pm.newQuery(MDatabase.class, "name == db");
       dbExistsQuery.declareParameters("java.lang.String db");
       dbExistsQuery.setUnique(true);
       dbExistsQuery.setResult("name");
@@ -1046,7 +1092,7 @@ public class ObjectStore implements RawStore, Configurable {
       for (String t : tbl_names) {
         lowered_tbl_names.add(HiveStringUtils.normalizeIdentifier(t));
       }
-      Query query = pm.newQuery(MTable.class);
+      query = pm.newQuery(MTable.class);
       query.setFilter("database.name == db && tbl_names.contains(tableName)");
       query.declareParameters("java.lang.String db, java.util.Collection tbl_names");
       Collection mtables = (Collection) query.execute(db, lowered_tbl_names);
@@ -1058,6 +1104,12 @@ public class ObjectStore implements RawStore, Configurable {
       if (!committed) {
         rollbackTransaction();
       }
+      if (dbExistsQuery != null) {
+        dbExistsQuery.closeAll();
+      }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return tables;
   }
@@ -1208,9 +1260,9 @@ public class ObjectStore implements RawStore, Configurable {
 
   // MSD and SD should be same objects. Not sure how to make then same right now
   // MSerdeInfo *& SerdeInfo should be same as well
-  private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd,
-      boolean noFS)
-      throws MetaException {
+  private StorageDescriptor convertToStorageDescriptor(
+      MStorageDescriptor msd,
+      boolean noFS) throws MetaException {
     if (msd == null) {
       return null;
     }
@@ -1296,8 +1348,6 @@ public class ObjectStore implements RawStore, Configurable {
     return map;
   }
 
-
-
   /**
    * Converts a storage descriptor to a db-backed storage descriptor.  Creates a
    *   new db-backed column descriptor object for this SD.
@@ -1404,7 +1454,6 @@ public class ObjectStore implements RawStore, Configurable {
     return !doesExist;
   }
 
-
   @Override
   public boolean addPartitions(String dbName, String tblName,
                                PartitionSpecProxy partitionSpec, boolean ifNotExists)
@@ -1531,10 +1580,11 @@ public class ObjectStore implements RawStore, Configurable {
     return part;
   }
 
-  private MPartition getMPartition(String dbName, String tableName,
-      List<String> part_vals) throws MetaException {
+  private MPartition getMPartition(String dbName, String tableName, List<String> part_vals)
+      throws MetaException {
     MPartition mpart = null;
     boolean commited = false;
+    Query query = null;
     try {
       openTransaction();
       dbName = HiveStringUtils.normalizeIdentifier(dbName);
@@ -1546,10 +1596,11 @@ public class ObjectStore implements RawStore, Configurable {
       }
       // Change the query to use part_vals instead of the name which is
       // redundant TODO: callers of this often get part_vals out of name for no reason...
-      String name = Warehouse.makePartName(convertToFieldSchemas(mtbl
-          .getPartitionKeys()), part_vals);
-      Query query = pm.newQuery(MPartition.class,
-          "table.tableName == t1 && table.database.name == t2 && partitionName == t3");
+      String name =
+          Warehouse.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals);
+      query =
+          pm.newQuery(MPartition.class,
+              "table.tableName == t1 && table.database.name == t2 && partitionName == t3");
       query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
       query.setUnique(true);
       mpart = (MPartition) query.execute(tableName, dbName, name);
@@ -1559,6 +1610,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mpart;
   }
@@ -1750,8 +1804,13 @@ public class ObjectStore implements RawStore, Configurable {
       }
       @Override
       protected List<Partition> getJdoResult(
-          GetHelper<List<Partition>> ctx) throws MetaException, NoSuchObjectException {
-        return convertToParts(listMPartitions(dbName, tblName, maxParts));
+          GetHelper<List<Partition>> ctx) throws MetaException {
+        QueryWrapper queryWrapper = new QueryWrapper();
+        try {
+          return convertToParts(listMPartitions(dbName, tblName, maxParts, queryWrapper));
+        } finally {
+          queryWrapper.close();
+        }
       }
     }.run(false);
   }
@@ -1759,11 +1818,13 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public List<Partition> getPartitionsWithAuth(String dbName, String tblName,
       short max, String userName, List<String> groupNames)
-      throws MetaException, NoSuchObjectException, InvalidObjectException {
+          throws MetaException, InvalidObjectException {
     boolean success = false;
+    QueryWrapper queryWrapper = new QueryWrapper();
+
     try {
       openTransaction();
-      List<MPartition> mparts = listMPartitions(dbName, tblName, max);
+      List<MPartition> mparts = listMPartitions(dbName, tblName, max, queryWrapper);
       List<Partition> parts = new ArrayList<Partition>(mparts.size());
       if (mparts != null && mparts.size()>0) {
         for (MPartition mpart : mparts) {
@@ -1786,6 +1847,7 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      queryWrapper.close();
     }
   }
 
@@ -1822,7 +1884,6 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
-
   private List<Partition> convertToParts(List<MPartition> mparts) throws MetaException {
     return convertToParts(mparts, null);
   }
@@ -1875,20 +1936,22 @@ public class ObjectStore implements RawStore, Configurable {
     List<String> pns = new ArrayList<String>();
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
-    Query q = pm.newQuery(
-        "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
-        + "where table.database.name == t1 && table.tableName == t2 "
-        + "order by partitionName asc");
-    q.declareParameters("java.lang.String t1, java.lang.String t2");
-    q.setResult("partitionName");
-
-    if(max > 0) {
-      q.setRange(0, max);
-    }
-    Collection names = (Collection) q.execute(dbName, tableName);
+    Query query =
+        pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+            + "where table.database.name == t1 && table.tableName == t2 "
+            + "order by partitionName asc");
+    query.declareParameters("java.lang.String t1, java.lang.String t2");
+    query.setResult("partitionName");
+    if (max > 0) {
+      query.setRange(0, max);
+    }
+    Collection names = (Collection) query.execute(dbName, tableName);
     for (Iterator i = names.iterator(); i.hasNext();) {
       pns.add((String) i.next());
     }
+    if (query != null) {
+      query.closeAll();
+    }
     return pns;
   }
 
@@ -1908,51 +1971,46 @@ public class ObjectStore implements RawStore, Configurable {
    *          has types of String, and if resultsCol is null, the types are MPartition.
    */
   private Collection getPartitionPsQueryResults(String dbName, String tableName,
-      List<String> part_vals, short max_parts, String resultsCol)
+      List<String> part_vals, short max_parts, String resultsCol, QueryWrapper queryWrapper)
       throws MetaException, NoSuchObjectException {
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     Table table = getTable(dbName, tableName);
-
     if (table == null) {
       throw new NoSuchObjectException(dbName + "." + tableName + " table not found");
     }
-
     List<FieldSchema> partCols = table.getPartitionKeys();
     int numPartKeys = partCols.size();
     if (part_vals.size() > numPartKeys) {
       throw new MetaException("Incorrect number of partition values");
     }
-
     partCols = partCols.subList(0, part_vals.size());
-    //Construct a pattern of the form: partKey=partVal/partKey2=partVal2/...
+    // Construct a pattern of the form: partKey=partVal/partKey2=partVal2/...
     // where partVal is either the escaped partition value given as input,
     // or a regex of the form ".*"
-    //This works because the "=" and "/" separating key names and partition key/values
+    // This works because the "=" and "/" separating key names and partition key/values
     // are not escaped.
     String partNameMatcher = Warehouse.makePartName(partCols, part_vals, ".*");
-    //add ".*" to the regex to match anything else afterwards the partial spec.
+    // add ".*" to the regex to match anything else afterwards the partial spec.
     if (part_vals.size() < numPartKeys) {
       partNameMatcher += ".*";
     }
-
-    Query q = pm.newQuery(MPartition.class);
+    Query query = queryWrapper.query = pm.newQuery(MPartition.class);
     StringBuilder queryFilter = new StringBuilder("table.database.name == dbName");
     queryFilter.append(" && table.tableName == tableName");
     queryFilter.append(" && partitionName.matches(partialRegex)");
-    q.setFilter(queryFilter.toString());
-    q.declareParameters("java.lang.String dbName, " +
-        "java.lang.String tableName, java.lang.String partialRegex");
-
-    if( max_parts >= 0 ) {
-      //User specified a row limit, set it on the Query
-      q.setRange(0, max_parts);
+    query.setFilter(queryFilter.toString());
+    query.declareParameters("java.lang.String dbName, "
+        + "java.lang.String tableName, java.lang.String partialRegex");
+    if (max_parts >= 0) {
+      // User specified a row limit, set it on the Query
+      query.setRange(0, max_parts);
     }
     if (resultsCol != null && !resultsCol.isEmpty()) {
-      q.setResult(resultsCol);
+      query.setResult(resultsCol);
     }
 
-    return (Collection) q.execute(dbName, tableName, partNameMatcher);
+    return (Collection) query.execute(dbName, tableName, partNameMatcher);
   }
 
   @Override
@@ -1961,11 +2019,13 @@ public class ObjectStore implements RawStore, Configurable {
       throws MetaException, InvalidObjectException, NoSuchObjectException {
     List<Partition> partitions = new ArrayList<Partition>();
     boolean success = false;
+    QueryWrapper queryWrapper = new QueryWrapper();
+
     try {
       openTransaction();
       LOG.debug("executing listPartitionNamesPsWithAuth");
       Collection parts = getPartitionPsQueryResults(db_name, tbl_name,
-          part_vals, max_parts, null);
+          part_vals, max_parts, null, queryWrapper);
       MTable mtbl = getMTable(db_name, tbl_name);
       for (Object o : parts) {
         Partition part = convertToPart((MPartition) o);
@@ -1985,6 +2045,7 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      queryWrapper.close();
     }
     return partitions;
   }
@@ -1994,11 +2055,13 @@ public class ObjectStore implements RawStore, Configurable {
       List<String> part_vals, short max_parts) throws MetaException, NoSuchObjectException {
     List<String> partitionNames = new ArrayList<String>();
     boolean success = false;
+    QueryWrapper queryWrapper = new QueryWrapper();
+
     try {
       openTransaction();
       LOG.debug("Executing listPartitionNamesPs");
       Collection names = getPartitionPsQueryResults(dbName, tableName,
-          part_vals, max_parts, "partitionName");
+          part_vals, max_parts, "partitionName", queryWrapper);
       for (Object o : names) {
         partitionNames.add((String) o);
       }
@@ -2007,14 +2070,13 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      queryWrapper.close();
     }
     return partitionNames;
   }
 
   // TODO:pc implement max
-  private List<MPartition> listMPartitions(String dbName, String tableName,
-      int max) {
-
+  private List<MPartition> listMPartitions(String dbName, String tableName, int max, QueryWrapper queryWrapper) {
     boolean success = false;
     List<MPartition> mparts = null;
     try {
@@ -2022,11 +2084,10 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Executing listMPartitions");
       dbName = HiveStringUtils.normalizeIdentifier(dbName);
       tableName = HiveStringUtils.normalizeIdentifier(tableName);
-      Query query = pm.newQuery(MPartition.class,
-          "table.tableName == t1 && table.database.name == t2");
+      Query query = queryWrapper.query = pm.newQuery(MPartition.class, "table.tableName == t1 && table.database.name == t2");
       query.declareParameters("java.lang.String t1, java.lang.String t2");
       query.setOrdering("partitionName ascending");
-      if(max > 0) {
+      if (max > 0) {
         query.setRange(0, max);
       }
       mparts = (List<MPartition>) query.execute(tableName, dbName);
@@ -2216,8 +2277,8 @@ public class ObjectStore implements RawStore, Configurable {
   private List<Partition> getPartitionsViaOrmFilter(Table table, ExpressionTree tree,
       short maxParts, boolean isValidatedFilter) throws MetaException {
     Map<String, Object> params = new HashMap<String, Object>();
-    String jdoFilter = makeQueryFilterString(
-        table.getDbName(), table, tree, params, isValidatedFilter);
+    String jdoFilter =
+        makeQueryFilterString(table.getDbName(), table, tree, params, isValidatedFilter);
     if (jdoFilter == null) {
       assert !isValidatedFilter;
       return null;
@@ -2227,14 +2288,11 @@ public class ObjectStore implements RawStore, Configurable {
       // User specified a row limit, set it on the Query
       query.setRange(0, maxParts);
     }
-
     String parameterDeclaration = makeParameterDeclarationStringObj(params);
     query.declareParameters(parameterDeclaration);
     query.setOrdering("partitionName ascending");
-
     @SuppressWarnings("unchecked")
     List<MPartition> mparts = (List<MPartition>) query.executeWithMap(params);
-
     LOG.debug("Done executing query for getPartitionsViaOrmFilter");
     pm.retrieveAll(mparts); // TODO: why is this inconsistent with what we get by names?
     LOG.debug("Done retrieving all objects for getPartitionsViaOrmFilter");
@@ -2243,10 +2301,6 @@ public class ObjectStore implements RawStore, Configurable {
     return results;
   }
 
-  private static class Out<T> {
-    public T val;
-  }
-
   /**
    * Gets partition names from the table via ORM (JDOQL) name filter.
    * @param dbName Database name.
@@ -2259,16 +2313,19 @@ public class ObjectStore implements RawStore, Configurable {
     if (partNames.isEmpty()) {
       return new ArrayList<Partition>();
     }
-    Out<Query> query = new Out<Query>();
-    List<MPartition> mparts = null;
-    try {
-      mparts = getMPartitionsViaOrmFilter(dbName, tblName, partNames, query);
-      return convertToParts(dbName, tblName, mparts);
-    } finally {
-      if (query.val != null) {
-        query.val.closeAll();
-      }
+    ObjectPair<Query, Map<String, String>> queryWithParams =
+        getPartQueryWithParams(dbName, tblName, partNames);
+    Query query = queryWithParams.getFirst();
+    query.setResultClass(MPartition.class);
+    query.setClass(MPartition.class);
+    query.setOrdering("partitionName ascending");
+    @SuppressWarnings("unchecked")
+    List<MPartition> mparts = (List<MPartition>)query.executeWithMap(queryWithParams.getSecond());
+    List<Partition> partitions = convertToParts(dbName, tblName, mparts);
+    if (query != null) {
+      query.closeAll();
     }
+    return partitions;
   }
 
   private void dropPartitionsNoTxn(String dbName, String tblName, List<String> partNames) {
@@ -2304,27 +2361,15 @@ public class ObjectStore implements RawStore, Configurable {
         sd.setCD(null);
       }
     }
+    if (query != null) {
+      query.closeAll();
+    }
     return candidateCds;
   }
 
-  private List<MPartition> getMPartitionsViaOrmFilter(String dbName,
-      String tblName, List<String> partNames, Out<Query> out) {
-    ObjectPair<Query, Map<String, String>> queryWithParams =
-        getPartQueryWithParams(dbName, tblName, partNames);
-    Query query = out.val = queryWithParams.getFirst();
-    query.setResultClass(MPartition.class);
-    query.setClass(MPartition.class);
-    query.setOrdering("partitionName ascending");
-
-    @SuppressWarnings("unchecked")
-    List<MPartition> result = (List<MPartition>)query.executeWithMap(queryWithParams.getSecond());
-    return result;
-  }
-
-  private ObjectPair<Query, Map<String, String>> getPartQueryWithParams(
-      String dbName, String tblName, List<String> partNames) {
-    StringBuilder sb = new StringBuilder(
-        "table.tableName == t1 && table.database.name == t2 && (");
+  private ObjectPair<Query, Map<String, String>> getPartQueryWithParams(String dbName,
+      String tblName, List<String> partNames) {
+    StringBuilder sb = new StringBuilder("table.tableName == t1 && table.database.name == t2 && (");
     int n = 0;
     Map<String, String> params = new HashMap<String, String>();
     for (Iterator<String> itr = partNames.iterator(); itr.hasNext();) {
@@ -2337,16 +2382,13 @@ public class ObjectStore implements RawStore, Configurable {
     }
     sb.setLength(sb.length() - 4); // remove the last " || "
     sb.append(')');
-
     Query query = pm.newQuery();
     query.setFilter(sb.toString());
-
     LOG.debug(" JDOQL filter is " + sb.toString());
     params.put("t1", HiveStringUtils.normalizeIdentifier(tblName));
     params.put("t2", HiveStringUtils.normalizeIdentifier(dbName));
-
     query.declareParameters(makeParameterDeclarationString(params));
-    return new ObjectPair<Query, Map<String,String>>(query, params);
+    return new ObjectPair<Query, Map<String, String>>(query, params);
   }
 
   @Override
@@ -2668,6 +2710,7 @@ public class ObjectStore implements RawStore, Configurable {
   public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
       throws MetaException {
     boolean success = false;
+    Query query = null;
     List<String> tableNames = new ArrayList<String>();
     try {
       openTransaction();
@@ -2675,7 +2718,7 @@ public class ObjectStore implements RawStore, Configurable {
       dbName = HiveStringUtils.normalizeIdentifier(dbName);
       Map<String, Object> params = new HashMap<String, Object>();
       String queryFilterString = makeQueryFilterString(dbName, null, filter, params);
-      Query query = pm.newQuery(MTable.class);
+      query = pm.newQuery(MTable.class);
       query.declareImports("import java.lang.String");
       query.setResult("tableName");
       query.setResultClass(java.lang.String.class);
@@ -2684,14 +2727,14 @@ public class ObjectStore implements RawStore, Configurable {
       }
       LOG.debug("filter specified is " + filter + "," + " JDOQL filter is " + queryFilterString);
       for (Entry<String, Object> entry : params.entrySet()) {
-        LOG.debug("key: " + entry.getKey() + " value: " + entry.getValue() +
-            " class: " + entry.getValue().getClass().getName());
+        LOG.debug("key: " + entry.getKey() + " value: " + entry.getValue() + " class: "
+            + entry.getValue().getClass().getName());
       }
       String parameterDeclaration = makeParameterDeclarationStringObj(params);
       query.declareParameters(parameterDeclaration);
       query.setFilter(queryFilterString);
-      Collection names = (Collection) query.executeWithMap(params);
-      //have to emulate "distinct", otherwise tables with the same name may be returned
+      Collection names = (Collection)query.executeWithMap(params);
+      // have to emulate "distinct", otherwise tables with the same name may be returned
       Set<String> tableNamesSet = new HashSet<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         tableNamesSet.add((String) i.next());
@@ -2700,58 +2743,54 @@ public class ObjectStore implements RawStore, Configurable {
       LOG.debug("Done executing query for listTableNamesByFilter");
       success = commitTransaction();
       LOG.debug("Done retrieving all objects for listTableNamesByFilter");
-
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return tableNames;
   }
 
   @Override
-  public List<String> listPartitionNamesByFilter(String dbName, String tableName,
-      String filter, short maxParts) throws MetaException {
+  public List<String> listPartitionNamesByFilter(String dbName, String tableName, String filter,
+      short maxParts) throws MetaException {
     boolean success = false;
+    Query query = null;
     List<String> partNames = new ArrayList<String>();
     try {
       openTransaction();
       LOG.debug("Executing listMPartitionNamesByFilter");
       dbName = HiveStringUtils.normalizeIdentifier(dbName);
       tableName = HiveStringUtils.normalizeIdentifier(tableName);
-
       MTable mtable = getMTable(dbName, tableName);
-      if( mtable == null ) {
+      if (mtable == null) {
         // To be consistent with the behavior of listPartitionNames, if the
         // table or db does not exist, we return an empty list
         return partNames;
       }
       Map<String, Object> params = new HashMap<String, Object>();
       String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params);
-      Query query = pm.newQuery(
-          "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
-          + "where " + queryFilterString);
-
-      if( maxParts >= 0 ) {
-        //User specified a row limit, set it on the Query
+      query =
+          pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+              + "where " + queryFilterString);
+      if (maxParts >= 0) {
+        // User specified a row limit, set it on the Query
         query.setRange(0, maxParts);
       }
-
-      LOG.debug("Filter specified is " + filter + "," +
-          " JDOQL filter is " + queryFilterString);
+      LOG.debug("Filter specified is " + filter + "," + " JDOQL filter is " + queryFilterString);
       LOG.debug("Parms is " + params);
-
       String parameterDeclaration = makeParameterDeclarationStringObj(params);
       query.declareParameters(parameterDeclaration);
       query.setOrdering("partitionName ascending");
       query.setResult("partitionName");
-
       Collection names = (Collection) query.executeWithMap(params);
       partNames = new ArrayList<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         partNames.add((String) i.next());
       }
-
       LOG.debug("Done executing query for listMPartitionNamesByFilter");
       success = commitTransaction();
       LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter");
@@ -2759,6 +2798,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return partNames;
   }
@@ -2962,10 +3004,12 @@ public class ObjectStore implements RawStore, Configurable {
     }
 
     boolean success = false;
+    QueryWrapper queryWrapper = new QueryWrapper();
+
     try {
       openTransaction();
       LOG.debug("execute removeUnusedColumnDescriptor");
-      List<MStorageDescriptor> referencedSDs = listStorageDescriptorsWithCD(oldCD, 1);
+      List<MStorageDescriptor> referencedSDs = listStorageDescriptorsWithCD(oldCD, 1, queryWrapper);
       //if no other SD references this CD, we can throw it out.
       if (referencedSDs != null && referencedSDs.isEmpty()) {
         pm.retrieve(oldCD);
@@ -2977,6 +3021,7 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      queryWrapper.close();
     }
   }
 
@@ -3005,21 +3050,22 @@ public class ObjectStore implements RawStore, Configurable {
    * @param maxSDs the maximum number of SDs to return
    * @return a list of storage descriptors
    */
-  private List<MStorageDescriptor> listStorageDescriptorsWithCD(MColumnDescriptor oldCD,
-      long maxSDs) {
+  private List<MStorageDescriptor> listStorageDescriptorsWithCD(
+      MColumnDescriptor oldCD,
+      long maxSDs,
+      QueryWrapper queryWrapper) {
     boolean success = false;
     List<MStorageDescriptor> sds = null;
     try {
       openTransaction();
       LOG.debug("Executing listStorageDescriptorsWithCD");
-      Query query = pm.newQuery(MStorageDescriptor.class,
-          "this.cd == inCD");
+      Query query = queryWrapper.query = pm.newQuery(MStorageDescriptor.class, "this.cd == inCD");
       query.declareParameters("MColumnDescriptor inCD");
-      if(maxSDs >= 0) {
-        //User specified a row limit, set it on the Query
+      if (maxSDs >= 0) {
+        // User specified a row limit, set it on the Query
         query.setRange(0, maxSDs);
       }
-      sds = (List<MStorageDescriptor>) query.execute(oldCD);
+      sds = (List<MStorageDescriptor>)query.execute(oldCD);
       LOG.debug("Done executing query for listStorageDescriptorsWithCD");
       pm.retrieveAll(sds);
       success = commitTransaction();
@@ -3096,9 +3142,11 @@ public class ObjectStore implements RawStore, Configurable {
     return success;
   }
 
-  private MIndex getMIndex(String dbName, String originalTblName, String indexName) throws MetaException {
+  private MIndex getMIndex(String dbName, String originalTblName, String indexName)
+      throws MetaException {
     MIndex midx = null;
     boolean commited = false;
+    Query query = null;
     try {
       openTransaction();
       dbName = HiveStringUtils.normalizeIdentifier(dbName);
@@ -3108,19 +3156,23 @@ public class ObjectStore implements RawStore, Configurable {
         commited = commitTransaction();
         return null;
       }
-
-      Query query = pm.newQuery(MIndex.class,
-        "origTable.tableName == t1 && origTable.database.name == t2 && indexName == t3");
+      query =
+          pm.newQuery(MIndex.class,
+              "origTable.tableName == t1 && origTable.database.name == t2 && indexName == t3");
       query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
       query.setUnique(true);
-      midx = (MIndex) query.execute(originalTblName, dbName,
-          HiveStringUtils.normalizeIdentifier(indexName));
+      midx =
+          (MIndex) query.execute(originalTblName, dbName,
+              HiveStringUtils.normalizeIdentifier(indexName));
       pm.retrieve(midx);
       commited = commitTransaction();
     } finally {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return midx;
   }
@@ -3161,64 +3213,55 @@ public class ObjectStore implements RawStore, Configurable {
   public List<Index> getIndexes(String dbName, String origTableName, int max)
       throws MetaException {
     boolean success = false;
+    Query query = null;
     try {
+      LOG.debug("Executing getIndexes");
       openTransaction();
-      List<MIndex> mIndexList = listMIndexes(dbName, origTableName, max);
-      List<Index> indexes = new ArrayList<Index>(mIndexList.size());
-      for (MIndex midx : mIndexList) {
-        indexes.add(this.convertToIndex(midx));
-      }
-      success = commitTransaction();
-      return indexes;
-    } finally {
-      if (!success) {
-        rollbackTransaction();
-      }
-    }
-  }
 
-  private List<MIndex> listMIndexes(String dbName, String origTableName,
-      int max) {
-    boolean success = false;
-    List<MIndex> mindexes = null;
-    try {
-      openTransaction();
-      LOG.debug("Executing listMIndexes");
       dbName = HiveStringUtils.normalizeIdentifier(dbName);
       origTableName = HiveStringUtils.normalizeIdentifier(origTableName);
-      Query query = pm.newQuery(MIndex.class,
-          "origTable.tableName == t1 && origTable.database.name == t2");
+      query =
+          pm.newQuery(MIndex.class, "origTable.tableName == t1 && origTable.database.name == t2");
       query.declareParameters("java.lang.String t1, java.lang.String t2");
-      mindexes = (List<MIndex>) query.execute(origTableName, dbName);
-      LOG.debug("Done executing query for listMIndexes");
-      pm.retrieveAll(mindexes);
+      List<MIndex> mIndexes = (List<MIndex>) query.execute(origTableName, dbName);
+      pm.retrieveAll(mIndexes);
+
+      List<Index> indexes = new ArrayList<Index>(mIndexes.size());
+      for (MIndex mIdx : mIndexes) {
+        indexes.add(this.convertToIndex(mIdx));
+      }
       success = commitTransaction();
-      LOG.debug("Done retrieving all objects for listMIndexes");
+      LOG.debug("Done retrieving all objects for getIndexes");
+
+      return indexes;
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
-    return mindexes;
   }
 
   @Override
-  public List<String> listIndexNames(String dbName, String origTableName,
-      short max) throws MetaException {
+  public List<String> listIndexNames(String dbName, String origTableName, short max)
+      throws MetaException {
     List<String> pns = new ArrayList<String>();
     boolean success = false;
+    Query query = null;
     try {
       openTransaction();
       LOG.debug("Executing listIndexNames");
       dbName = HiveStringUtils.normalizeIdentifier(dbName);
       origTableName = HiveStringUtils.normalizeIdentifier(origTableName);
-      Query q = pm.newQuery(
-          "select indexName from org.apache.hadoop.hive.metastore.model.MIndex "
-          + "where origTable.database.name == t1 && origTable.tableName == t2 "
-          + "order by indexName asc");
-      q.declareParameters("java.lang.String t1, java.lang.String t2");
-      q.setResult("indexName");
-      Collection names = (Collection) q.execute(dbName, origTableName);
+      query =
+          pm.newQuery("select indexName from org.apache.hadoop.hive.metastore.model.MIndex "
+              + "where origTable.database.name == t1 && origTable.tableName == t2 "
+              + "order by indexName asc");
+      query.declareParameters("java.lang.String t1, java.lang.String t2");
+      query.setResult("indexName");
+      Collection names = (Collection) query.execute(dbName, origTableName);
       for (Iterator i = names.iterator(); i.hasNext();) {
         pns.add((String) i.next());
       }
@@ -3227,6 +3270,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return pns;
   }
@@ -3243,8 +3289,7 @@ public class ObjectStore implements RawStore, Configurable {
         throw new InvalidObjectException("Role " + roleName + " already exists.");
       }
       int now = (int)(System.currentTimeMillis()/1000);
-      MRole mRole = new MRole(roleName, now,
-          ownerName);
+      MRole mRole = new MRole(roleName, now, ownerName);
       pm.makePersistent(mRole);
       commited = commitTransaction();
       success = true;
@@ -3334,13 +3379,16 @@ public class ObjectStore implements RawStore, Configurable {
     return success;
   }
 
-  private MRoleMap getMSecurityUserRoleMap(String userName,
-      PrincipalType principalType, String roleName) {
+  private MRoleMap getMSecurityUserRoleMap(String userName, PrincipalType principalType,
+      String roleName) {
     MRoleMap mRoleMember = null;
     boolean commited = false;
+    Query query = null;
     try {
       openTransaction();
-      Query query = pm.newQuery(MRoleMap.class, "principalName == t1 && principalType == t2 && role.roleName == t3");
+      query =
+          pm.newQuery(MRoleMap.class,
+              "principalName == t1 && principalType == t2 && role.roleName == t3");
       query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
       query.setUnique(true);
       mRoleMember = (MRoleMap) query.executeWithArray(userName, principalType.toString(), roleName);
@@ -3350,6 +3398,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mRoleMember;
   }
@@ -3358,6 +3409,7 @@ public class ObjectStore implements RawStore, Configurable {
   public boolean removeRole(String roleName) throws MetaException,
       NoSuchObjectException {
     boolean success = false;
+    QueryWrapper queryWrapper = new QueryWrapper();
     try {
       openTransaction();
       MRole mRol = getMRole(roleName);
@@ -3370,10 +3422,11 @@ public class ObjectStore implements RawStore, Configurable {
           pm.deletePersistentAll(roleMap);
         }
         List<MRoleMap> roleMember = listMSecurityPrincipalMembershipRole(mRol
-            .getRoleName(), PrincipalType.ROLE);
+            .getRoleName(), PrincipalType.ROLE, queryWrapper);
         if (roleMember.size() > 0) {
           pm.deletePersistentAll(roleMember);
         }
+        queryWrapper.close();
         // then remove all the grants
         List<MGlobalPrivilege> userGrants = listPrincipalGlobalGrants(
             mRol.getRoleName(), PrincipalType.ROLE);
@@ -3381,30 +3434,36 @@ public class ObjectStore implements RawStore, Configurable {
           pm.deletePersistentAll(userGrants);
         }
         List<MDBPrivilege> dbGrants = listPrincipalAllDBGrant(mRol
-            .getRoleName(), PrincipalType.ROLE);
+            .getRoleName(), PrincipalType.ROLE, queryWrapper);
         if (dbGrants.size() > 0) {
           pm.deletePersistentAll(dbGrants);
         }
+        queryWrapper.close();
         List<MTablePrivilege> tabPartGrants = listPrincipalAllTableGrants(
-            mRol.getRoleName(), PrincipalType.ROLE);
+            mRol.getRoleName(), PrincipalType.ROLE, queryWrapper);
         if (tabPartGrants.size() > 0) {
           pm.deletePersistentAll(tabPartGrants);
         }
+        queryWrapper.close();
         List<MPartitionPrivilege> partGrants = listPrincipalAllPartitionGrants(
-            mRol.getRoleName(), PrincipalType.ROLE);
+            mRol.getRoleName(), PrincipalType.ROLE, queryWrapper);
         if (partGrants.size() > 0) {
           pm.deletePersistentAll(partGrants);
         }
+        queryWrapper.close();
         List<MTableColumnPrivilege> tblColumnGrants = listPrincipalAllTableColumnGrants(
-            mRol.getRoleName(), PrincipalType.ROLE);
+            mRol.getRoleName(), PrincipalType.ROLE, queryWrapper);
         if (tblColumnGrants.size() > 0) {
           pm.deletePersistentAll(tblColumnGrants);
         }
+        queryWrapper.close();
         List<MPartitionColumnPrivilege> partColumnGrants = listPrincipalAllPartitionColumnGrants(
-            mRol.getRoleName(), PrincipalType.ROLE);
+            mRol.getRoleName(), PrincipalType.ROLE, queryWrapper);
         if (partColumnGrants.size() > 0) {
           pm.deletePersistentAll(partColumnGrants);
         }
+        queryWrapper.close();
+
         // finally remove the role
         pm.deletePersistent(mRol);
       }
@@ -3413,6 +3472,8 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+
+      queryWrapper.close();
     }
     return success;
   }
@@ -3461,66 +3522,62 @@ public class ObjectStore implements RawStore, Configurable {
 
   @SuppressWarnings("unchecked")
   @Override
-  public List<MRoleMap> listRoles(String principalName,
-      PrincipalType principalType) {
+  public List<MRoleMap> listRoles(String principalName, PrincipalType principalType) {
     boolean success = false;
-    List<MRoleMap> mRoleMember = null;
+    Query query = null;
+    List<MRoleMap> mRoleMember = new ArrayList<MRoleMap>();
+
     try {
-      openTransaction();
       LOG.debug("Executing listRoles");
-      Query query = pm
-          .newQuery(
-              MRoleMap.class,
-              "principalName == t1 && principalType == t2");
-      query
-          .declareParameters("java.lang.String t1, java.lang.String t2");
+
+      openTransaction();
+      query = pm.newQuery(MRoleMap.class, "principalName == t1 && principalType == t2");
+      query.declareParameters("java.lang.String t1, java.lang.String t2");
       query.setUnique(false);
-      mRoleMember = (List<MRoleMap>) query.executeWithArray(
-          principalName, principalType.toString());
-      LOG.debug("Done executing query for listMSecurityUserRoleMap");
-      pm.retrieveAll(mRoleMember);
+      List<MRoleMap> mRoles =
+          (List<MRoleMap>) query.executeWithArray(principalName, principalType.toString());
+      pm.retrieveAll(mRoles);
       success = commitTransaction();
-      LOG.debug("Done retrieving all objects for listMSecurityUserRoleMap");
+
+      mRoleMember.addAll(mRoles);
+
+      LOG.debug("Done retrieving all objects for listRoles");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
 
     if (principalType == PrincipalType.USER) {
       // All users belong to public role implicitly, add that role
-      if (mRoleMember == null) {
-        mRoleMember = new ArrayList<MRoleMap>();
-      } else {
-        mRoleMember = new ArrayList<MRoleMap>(mRoleMember);
-      }
       MRole publicRole = new MRole(HiveMetaStore.PUBLIC, 0, HiveMetaStore.PUBLIC);
-      mRoleMember.add(new MRoleMap(principalName, principalType.toString(), publicRole, 0,
-          null, null, false));
+      mRoleMember.add(new MRoleMap(principalName, principalType.toString(), publicRole, 0, null,
+          null, false));
     }
-    return mRoleMember;
 
+    return mRoleMember;
   }
 
   @SuppressWarnings("unchecked")
   private List<MRoleMap> listMSecurityPrincipalMembershipRole(final String roleName,
-      final PrincipalType principalType) {
+      final PrincipalType principalType,
+      QueryWrapper queryWrapper) {
     boolean success = false;
     List<MRoleMap> mRoleMemebership = null;
     try {
-      openTransaction();
       LOG.debug("Executing listMSecurityPrincipalMembershipRole");
-      Query query = pm.newQuery(MRoleMap.class,
-          "principalName == t1 && principalType == t2");
-      query
-          .declareParameters("java.lang.String t1, java.lang.String t2");
+
+      openTransaction();
+      Query query = queryWrapper.query = pm.newQuery(MRoleMap.class, "principalName == t1 && principalType == t2");
+      query.declareParameters("java.lang.String t1, java.lang.String t2");
       mRoleMemebership = (List<MRoleMap>) query.execute(roleName, principalType.toString());
-      LOG
-          .debug("Done executing query for listMSecurityPrincipalMembershipRole");
       pm.retrieveAll(mRoleMemebership);
       success = commitTransaction();
-      LOG
-          .debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole");
+
+      LOG.debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole");
     } finally {
       if (!success) {
         rollbackTransaction();
@@ -3543,9 +3600,10 @@ public class ObjectStore implements RawStore, Configurable {
   private MRole getMRole(String roleName) {
     MRole mrole = null;
     boolean commited = false;
+    Query query = null;
     try {
       openTransaction();
-      Query query = pm.newQuery(MRole.class, "roleName == t1");
+      query = pm.newQuery(MRole.class, "roleName == t1");
       query.declareParameters("java.lang.String t1");
       query.setUnique(true);
       mrole = (MRole) query.execute(roleName);
@@ -3555,6 +3613,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mrole;
   }
@@ -3562,13 +3623,14 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public List<String> listRoleNames() {
     boolean success = false;
+    Query query = null;
     try {
       openTransaction();
       LOG.debug("Executing listAllRoleNames");
-      Query query = pm.newQuery("select roleName from org.apache.hadoop.hive.metastore.model.MRole");
+      query = pm.newQuery("select roleName from org.apache.hadoop.hive.metastore.model.MRole");
       query.setResult("roleName");
       Collection names = (Collection) query.execute();
-      List<String> roleNames  = new ArrayList<String>();
+      List<String> roleNames = new ArrayList<String>();
       for (Iterator i = names.iterator(); i.hasNext();) {
         roleNames.add((String) i.next());
       }
@@ -3578,6 +3640,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -4386,49 +4451,61 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public List<MRoleMap> listRoleMembers(String roleName) {
     boolean success = false;
-    List<MRoleMap> mRoleMemeberList = null;
+    Query query = null;
+    List<MRoleMap> mRoleMemeberList = new ArrayList<MRoleMap>();
     try {
+      LOG.debug("Executing listRoleMembers");
+
       openTransaction();
-      LOG.debug("Executing listMSecurityUserRoleMember");
-      Query query = pm.newQuery(MRoleMap.class,
-          "role.roleName == t1");
+      query = pm.newQuery(MRoleMap.class, "role.roleName == t1");
       query.declareParameters("java.lang.String t1");
       query.setUnique(false);
-      mRoleMemeberList = (List<MRoleMap>) query.execute(
-          roleName);
-      LOG.debug("Done executing query for listMSecurityUserRoleMember");
-      pm.retrieveAll(mRoleMemeberList);
+      List<MRoleMap> mRoles = (List<MRoleMap>) query.execute(roleName);
+      pm.retrieveAll(mRoles);
       success = commitTransaction();
-      LOG.debug("Done retrieving all objects for listMSecurityUserRoleMember");
+
+      mRoleMemeberList.addAll(mRoles);
+
+      LOG.debug("Done retrieving all objects for listRoleMembers");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mRoleMemeberList;
   }
 
   @SuppressWarnings("unchecked")
   @Override
-  public List<MGlobalPrivilege> listPrincipalGlobalGrants(String principalName, PrincipalType principalType) {
+  public List<MGlobalPrivilege> listPrincipalGlobalGrants(String principalName,
+      PrincipalType principalType) {
     boolean commited = false;
-    List<MGlobalPrivilege> userNameDbPriv = null;
+    Query query = null;
+    List<MGlobalPrivilege> userNameDbPriv = new ArrayList<MGlobalPrivilege>();
     try {
+      List<MGlobalPrivilege> mPrivs = null;
       openTransaction();
       if (principalName != null) {
-        Query query = pm.newQuery(MGlobalPrivilege.class,
-            "principalName == t1 && principalType == t2 ");
-        query.declareParameters(
-            "java.lang.String t1, java.lang.String t2");
-        userNameDbPriv = (List<MGlobalPrivilege>) query
-            .executeWithArray(principalName, principalType.toString());
-        pm.retrieveAll(userNameDbPriv);
+        query = pm.newQuery(MGlobalPrivilege.class, "principalName == t1 && principalType == t2 ");
+        query.declareParameters("java.lang.String t1, java.lang.String t2");
+        mPrivs = (List<MGlobalPrivilege>) query
+                .executeWithArray(principalName, principalType.toString());
+        pm.retrieveAll(mPrivs);
       }
       commited = commitTransaction();
+      if (mPrivs != null) {
+        userNameDbPriv.addAll(mPrivs);
+      }
     } finally {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return userNameDbPriv;
   }
@@ -4436,9 +4513,10 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public List<HiveObjectPrivilege> listGlobalGrantsAll() {
     boolean commited = false;
+    Query query = null;
     try {
       openTransaction();
-      Query query = pm.newQuery(MGlobalPrivilege.class);
+      query = pm.newQuery(MGlobalPrivilege.class);
       List<MGlobalPrivilege> userNameDbPriv = (List<MGlobalPrivilege>) query.execute();
       pm.retrieveAll(userNameDbPriv);
       commited = commitTransaction();
@@ -4447,6 +4525,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!commited) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -4470,25 +4551,32 @@ public class ObjectStore implements RawStore, Configurable {
   public List<MDBPrivilege> listPrincipalDBGrants(String principalName,
       PrincipalType principalType, String dbName) {
     boolean success = false;
-    List<MDBPrivilege> mSecurityDBList = null;
+    Query query = null;
+    List<MDBPrivilege> mSecurityDBList = new ArrayList<MDBPrivilege>();
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
-
     try {
-      openTransaction();
       LOG.debug("Executing listPrincipalDBGrants");
-        Query query = pm.newQuery(MDBPrivilege.class,
-            "principalName == t1 && principalType == t2 && database.name == t3");
-        query
-            .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
-        mSecurityDBList = (List<MDBPrivilege>) query.executeWithArray(principalName, principalType.toString(), dbName);
-      LOG.debug("Done executing query for listPrincipalDBGrants");
-      pm.retrieveAll(mSecurityDBList);
+
+      openTransaction();
+      query =
+          pm.newQuery(MDBPrivilege.class,
+              "principalName == t1 && principalType == t2 && database.name == t3");
+      query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
+      List<MDBPrivilege> mPrivs =
+          (List<MDBPrivilege>) query.executeWithArray(principalName, principalType.toString(),
+              dbName);
+      pm.retrieveAll(mPrivs);
       success = commitTransaction();
+
+      mSecurityDBList.addAll(mPrivs);
       LOG.debug("Done retrieving all objects for listPrincipalDBGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityDBList;
   }
@@ -4496,12 +4584,22 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
       String principalName, PrincipalType principalType) {
-    return convertDB(listPrincipalAllDBGrant(principalName, principalType));
+    QueryWrapper queryWrapper = new QueryWrapper();
+    try {
+      return convertDB(listPrincipalAllDBGrant(principalName, principalType, queryWrapper));
+    } finally {
+      queryWrapper.close();
+    }
   }
 
   @Override
   public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
-    return convertDB(listDatabaseGrants(dbName));
+    QueryWrapper queryWrapper = new QueryWrapper();
+    try {
+      return convertDB(listDatabaseGrants(dbName, queryWrapper));
+      } finally {
+        queryWrapper.close();
+      }
   }
 
   private List<HiveObjectPrivilege> convertDB(List<MDBPrivilege> privs) {
@@ -4522,26 +4620,28 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @SuppressWarnings("unchecked")
-  private List<MDBPrivilege> listPrincipalAllDBGrant(
-      String principalName, PrincipalType principalType) {
+  private List<MDBPrivilege> listPrincipalAllDBGrant(String principalName,
+      PrincipalType principalType,
+      QueryWrapper queryWrapper) {
     boolean success = false;
+    Query query = null;
     List<MDBPrivilege> mSecurityDBList = null;
     try {
-      openTransaction();
       LOG.debug("Executing listPrincipalAllDBGrant");
+
+      openTransaction();
       if (principalName != null && principalType != null) {
-        Query query = pm.newQuery(MDBPrivilege.class,
-            "principalName == t1 && principalType == t2");
-        query
-            .declareParameters("java.lang.String t1, java.lang.String t2");
-        mSecurityDBList = (List<MDBPrivilege>) query.execute(principalName, principalType.toString());
+        query = queryWrapper.query = pm.newQuery(MDBPrivilege.class, "principalName == t1 && principalType == t2");
+        query.declareParameters("java.lang.String t1, java.lang.String t2");
+        mSecurityDBList =
+            (List<MDBPrivilege>) query.execute(principalName, principalType.toString());
       } else {
-        Query query = pm.newQuery(MDBPrivilege.class);
+        query = queryWrapper.query = pm.newQuery(MDBPrivilege.class);
         mSecurityDBList = (List<MDBPrivilege>) query.execute();
       }
-      LOG.debug("Done executing query for listPrincipalAllDBGrant");
       pm.retrieveAll(mSecurityDBList);
       success = commitTransaction();
+
       LOG.debug("Done retrieving all objects for listPrincipalAllDBGrant");
     } finally {
       if (!success) {
@@ -4552,91 +4652,101 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @SuppressWarnings("unchecked")
-  public List<MTablePrivilege> listAllTableGrants(String dbName,
-      String tableName) {
+  public List<MTablePrivilege> listAllTableGrants(String dbName, String tableName) {
     boolean success = false;
+    Query query = null;
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
-    List<MTablePrivilege> mSecurityTabList = null;
+    List<MTablePrivilege> mSecurityTabList = new ArrayList<MTablePrivilege>();
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
     try {
-      openTransaction();
       LOG.debug("Executing listAllTableGrants");
+
+      openTransaction();
       String queryStr = "table.tableName == t1 && table.database.name == t2";
-      Query query = pm.newQuery(
-          MTablePrivilege.class, queryStr);
-      query.declareParameters(
-          "java.lang.String t1, java.lang.String t2");
-      mSecurityTabList = (List<MTablePrivilege>) query
-          .executeWithArray(tableName, dbName);
+      query = pm.newQuery(MTablePrivilege.class, queryStr);
+      query.declareParameters("java.lang.String t1, java.lang.String t2");
+      List<MTablePrivilege> mPrivs  = (List<MTablePrivilege>) query.executeWithArray(tableName, dbName);
       LOG.debug("Done executing query for listAllTableGrants");
-      pm.retrieveAll(mSecurityTabList);
+      pm.retrieveAll(mPrivs);
       success = commitTransaction();
-      LOG
-          .debug("Done retrieving all objects for listAllTableGrants");
+
+      mSecurityTabList.addAll(mPrivs);
+
+      LOG.debug("Done retrieving all objects for listAllTableGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityTabList;
   }
 
   @SuppressWarnings("unchecked")
-  public List<MPartitionPrivilege> listTableAllPartitionGrants(String dbName,
-      String tableName) {
+  public List<MPartitionPrivilege> listTableAllPartitionGrants(String dbName, String tableName) {
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
     boolean success = false;
-    List<MPartitionPrivilege> mSecurityTabPartList = null;
+    Query query = null;
+    List<MPartitionPrivilege> mSecurityTabPartList = new ArrayList<MPartitionPrivilege>();
     try {
-      openTransaction();
       LOG.debug("Executing listTableAllPartitionGrants");
+
+      openTransaction();
       String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2";
-      Query query = pm.newQuery(
-          MPartitionPrivilege.class, queryStr);
-      query.declareParameters(
-          "java.lang.String t1, java.lang.String t2");
-      mSecurityTabPartList = (List<MPartitionPrivilege>) query
-          .executeWithArray(tableName, dbName);
-      LOG.debug("Done executing query for listTableAllPartitionGrants");
-      pm.retrieveAll(mSecurityTabPartList);
+      query = pm.newQuery(MPartitionPrivilege.class, queryStr);
+      query.declareParameters("java.lang.String t1, java.lang.String t2");
+      List<MPartitionPrivilege> mPrivs = (List<MPartitionPrivilege>) query.executeWithArray(tableName, dbName);
+      pm.retrieveAll(mPrivs);
       success = commitTransaction();
-      LOG
-          .debug("Done retrieving all objects for listTableAllPartitionGrants");
+
+      mSecurityTabPartList.addAll(mPrivs);
+
+      LOG.debug("Done retrieving all objects for listTableAllPartitionGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityTabPartList;
   }
 
   @SuppressWarnings("unchecked")
-  public List<MTableColumnPrivilege> listTableAllColumnGrants(String dbName,
-      String tableName) {
+  public List<MTableColumnPrivilege> listTableAllColumnGrants(String dbName, String tableName) {
     boolean success = false;
-    List<MTableColumnPrivilege> mTblColPrivilegeList = null;
+    Query query = null;
+    List<MTableColumnPrivilege> mTblColPrivilegeList = new ArrayList<MTableColumnPrivilege>();
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
-
     try {
-      openTransaction();
       LOG.debug("Executing listTableAllColumnGrants");
+
+      openTransaction();
       String queryStr = "table.tableName == t1 && table.database.name == t2";
-      Query query = pm.newQuery(MTableColumnPrivilege.class, queryStr);
+      query = pm.newQuery(MTableColumnPrivilege.class, queryStr);
       query.declareParameters("java.lang.String t1, java.lang.String t2");
-      mTblColPrivilegeList = (List<MTableColumnPrivilege>) query
-          .executeWithArray(tableName, dbName);
-      LOG.debug("Done executing query for listTableAllColumnGrants");
-      pm.retrieveAll(mTblColPrivilegeList);
+      List<MTableColumnPrivilege> mPrivs =
+          (List<MTableColumnPrivilege>) query.executeWithArray(tableName, dbName);
+      pm.retrieveAll(mPrivs);
       success = commitTransaction();
+
+      mTblColPrivilegeList.addAll(mPrivs);
+
       LOG.debug("Done retrieving all objects for listTableAllColumnGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mTblColPrivilegeList;
   }
@@ -4645,26 +4755,32 @@ public class ObjectStore implements RawStore, Configurable {
   public List<MPartitionColumnPrivilege> listTableAllPartitionColumnGrants(String dbName,
       String tableName) {
     boolean success = false;
+    Query query = null;
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
-
-    List<MPartitionColumnPrivilege> mSecurityColList = null;
+    List<MPartitionColumnPrivilege> mSecurityColList = new ArrayList<MPartitionColumnPrivilege>();
     try {
-      openTransaction();
       LOG.debug("Executing listTableAllPartitionColumnGrants");
+
+      openTransaction();
       String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2";
-      Query query = pm.newQuery(MPartitionColumnPrivilege.class, queryStr);
+      query = pm.newQuery(MPartitionColumnPrivilege.class, queryStr);
       query.declareParameters("java.lang.String t1, java.lang.String t2");
-      mSecurityColList = (List<MPartitionColumnPrivilege>) query
-          .executeWithArray(tableName, dbName);
-      LOG.debug("Done executing query for listTableAllPartitionColumnGrants");
-      pm.retrieveAll(mSecurityColList);
+      List<MPartitionColumnPrivilege> mPrivs =
+          (List<MPartitionColumnPrivilege>) query.executeWithArray(tableName, dbName);
+      pm.retrieveAll(mPrivs);
       success = commitTransaction();
+
+      mSecurityColList.addAll(mPrivs);
+
       LOG.debug("Done retrieving all objects for listTableAllPartitionColumnGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityColList;
   }
@@ -4704,19 +4820,17 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @SuppressWarnings("unchecked")
-  private List<MDBPrivilege> listDatabaseGrants(String dbName) {
+  private List<MDBPrivilege> listDatabaseGrants(String dbName, QueryWrapper queryWrapper) {
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
-
     boolean success = false;
+
     try {
-      openTransaction();
       LOG.debug("Executing listDatabaseGrants");
-      Query query = pm.newQuery(MDBPrivilege.class,
-          "database.name == t1");
+
+      openTransaction();
+      Query query = queryWrapper.query = pm.newQuery(MDBPrivilege.class, "database.name == t1");
       query.declareParameters("java.lang.String t1");
-      List<MDBPrivilege> mSecurityDBList = (List<MDBPrivilege>) query
-          .executeWithArray(dbName);
-      LOG.debug("Done executing query for listDatabaseGrants");
+      List<MDBPrivilege> mSecurityDBList = (List<MDBPrivilege>) query.executeWithArray(dbName);
       pm.retrieveAll(mSecurityDBList);
       success = commitTransaction();
       LOG.debug("Done retrieving all objects for listDatabaseGrants");
@@ -4792,162 +4906,181 @@ public class ObjectStore implements RawStore, Configurable {
 
   @Override
   @SuppressWarnings("unchecked")
-  public List<MTablePrivilege> listAllTableGrants(
-      String principalName, PrincipalType principalType, String dbName,
-      String tableName) {
+  public List<MTablePrivilege> listAllTableGrants(String principalName,
+      PrincipalType principalType, String dbName, String tableName) {
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
-
     boolean success = false;
-    List<MTablePrivilege> mSecurityTabPartList = null;
+    Query query = null;
+    List<MTablePrivilege> mSecurityTabPartList = new ArrayList<MTablePrivilege>();
     try {
       openTransaction();
       LOG.debug("Executing listAllTableGrants");
-      Query query = pm.newQuery(
-          MTablePrivilege.class,
+      query =
+          pm.newQuery(MTablePrivilege.class,
               "principalName == t1 && principalType == t2 && table.tableName == t3 && table.database.name == t4");
-      query.declareParameters(
-          "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4");
-      mSecurityTabPartList = (List<MTablePrivilege>) query
-          .executeWithArray(principalName, principalType.toString(), tableName, dbName);
-      LOG.debug("Done executing query for listAllTableGrants");
-      pm.retrieveAll(mSecurityTabPartList);
+      query
+          .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4");
+      List<MTablePrivilege> mPrivs =
+          (List<MTablePrivilege>) query.executeWithArray(principalName, principalType.toString(),
+              tableName, dbName);
+      pm.retrieveAll(mPrivs);
       success = commitTransaction();
-      LOG
-          .debug("Done retrieving all objects for listAllTableGrants");
+
+      mSecurityTabPartList.addAll(mPrivs);
+
+      LOG.debug("Done retrieving all objects for listAllTableGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityTabPartList;
   }
 
   @SuppressWarnings("unchecked")
   @Override
-  public List<MPartitionPrivilege> listPrincipalPartitionGrants(
-      String principalName, PrincipalType principalType, String dbName,
-      String tableName, String partName) {
+  public List<MPartitionPrivilege> listPrincipalPartitionGrants(String principalName,
+      PrincipalType principalType, String dbName, String tableName, String partName) {
     boolean success = false;
+    Query query = null;
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
-
-    List<MPartitionPrivilege> mSecurityTabPartList = null;
+    List<MPartitionPrivilege> mSecurityTabPartList = new ArrayList<MPartitionPrivilege>();
     try {
-      openTransaction();
-      LOG.debug("Executing listMSecurityPrincipalPartitionGrant");
-      Query query = pm.newQuery(
-          MPartitionPrivilege.class,
-              "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " +
-              "&& partition.table.database.name == t4 && partition.partitionName == t5");
-      query.declareParameters(
-          "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, " +
-          "java.lang.String t5");
-      mSecurityTabPartList = (List<MPartitionPrivilege>) query
-          .executeWithArray(principalName, principalType.toString(), tableName, dbName, partName);
-      LOG.debug("Done executing query for listMSecurityPrincipalPartitionGrant");
+      LOG.debug("Executing listPrincipalPartitionGrants");
 
-      pm.retrieveAll(mSecurityTabPartList);
+      openTransaction();
+      query =
+          pm.newQuery(MPartitionPrivilege.class,
+              "principalName == t1 && principalType == t2 && partition.table.tableName == t3 "
+                  + "&& partition.table.database.name == t4 && partition.partitionName == t5");
+      query
+          .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, "
+              + "java.lang.String t5");
+      List<MPartitionPrivilege> mPrivs =
+          (List<MPartitionPrivilege>) query.executeWithArray(principalName,
+              principalType.toString(), tableName, dbName, partName);
+      pm.retrieveAll(mPrivs);
       success = commitTransaction();
-      LOG.debug("Done retrieving all objects for listMSecurityPrincipalPartitionGrant");
+
+      mSecurityTabPartList.addAll(mPrivs);
+
+      LOG.debug("Done retrieving all objects for listPrincipalPartitionGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityTabPartList;
   }
 
   @SuppressWarnings("unchecked")
   @Override
-  public List<MTableColumnPrivilege> listPrincipalTableColumnGrants(
-      String principalName, PrincipalType principalType, String dbName,
-      String tableName, String columnName) {
+  public List<MTableColumnPrivilege> listPrincipalTableColumnGrants(String principalName,
+      PrincipalType principalType, String dbName, String tableName, String columnName) {
     boolean success = false;
+    Query query = null;
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
     columnName = HiveStringUtils.normalizeIdentifier(columnName);
-    List<MTableColumnPrivilege> mSecurityColList = null;
+    List<MTableColumnPrivilege> mSecurityColList = new ArrayList<MTableColumnPrivilege>();
     try {
-      openTransaction();
       LOG.debug("Executing listPrincipalTableColumnGrants");
-      String queryStr = "principalName == t1 && principalType == t2 && " +
-          "table.tableName == t3 && table.database.name == t4 &&  columnName == t5 ";
-      Query query = pm.newQuery(MTableColumnPrivilege.class, queryStr);
-      query
-          .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " +
-              "java.lang.String t4, java.lang.String t5");
-      mSecurityColList = (List<MTableColumnPrivilege>) query.executeWithArray(
-          principalName, principalType.toString(), tableName, dbName, columnName);
-      LOG.debug("Done executing query for listPrincipalTableColumnGrants");
-      pm.retrieveAll(mSecurityColList);
+
+      openTransaction();
+      String queryStr =
+          "principalName == t1 && principalType == t2 && "
+              + "table.tableName == t3 && table.database.name == t4 &&  columnName == t5 ";
+      query = pm.newQuery(MTableColumnPrivilege.class, queryStr);
+      query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, "
+          + "java.lang.String t4, java.lang.String t5");
+      List<MTableColumnPrivilege> mPrivs =
+          (List<MTableColumnPrivilege>) query.executeWithArray(principalName,
+              principalType.toString(), tableName, dbName, columnName);
+      pm.retrieveAll(mPrivs);
       success = commitTransaction();
-      LOG
-          .debug("Done retrieving all objects for listPrincipalTableColumnGrants");
+
+      mSecurityColList.addAll(mPrivs);
+
+      LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityColList;
   }
 
   @Override
   @SuppressWarnings("unchecked")
-  public List<MPartitionColumnPrivilege> listPrincipalPartitionColumnGrants(
-      String principalName, PrincipalType principalType, String dbName,
-      String tableName, String partitionName, String columnName) {
+  public List<MPartitionColumnPrivilege> listPrincipalPartitionColumnGrants(String principalName,
+      PrincipalType principalType, String dbName, String tableName, String partitionName,
+      String columnName) {
     boolean success = false;
+    Query query = null;
     tableName = HiveStringUtils.normalizeIdentifier(tableName);
     dbName = HiveStringUtils.normalizeIdentifier(dbName);
     columnName = HiveStringUtils.normalizeIdentifier(columnName);
-
-    List<MPartitionColumnPrivilege> mSecurityColList = null;
+    List<MPartitionColumnPrivilege> mSecurityColList = new ArrayList<MPartitionColumnPrivilege>();
     try {
-      openTransaction();
       LOG.debug("Executing listPrincipalPartitionColumnGrants");
-      Query query = pm
-          .newQuery(
+
+      openTransaction();
+      query = pm.newQuery(
               MPartitionColumnPrivilege.class,
-              "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " +
-              "&& partition.table.database.name == t4 && partition.partitionName == t5 && columnName == t6");
-      query
-          .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " +
-              "java.lang.String t4, java.lang.String t5, java.lang.String t6");
+              "principalName == t1 && principalType == t2 && partition.table.tableName == t3 "
+                  + "&& partition.table.database.name == t4 && partition.partitionName == t5 && columnName == t6");
+      query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, "
+          + "java.lang.String t4, java.lang.String t5, java.lang.String t6");
+      List<MPartitionColumnPrivilege> mPrivs =
+          (List<MPartitionColumnPrivilege>) query.executeWithArray(principalName,
+              principalType.toString(), tableName, dbName, partitionName, columnName);
+      pm.retrieveAll(mPrivs);
+      success = commitTransaction();
 
-      mSecurityColList = (List<MPartitionColumnPrivilege>) query
-          .executeWithArray(principalName, principalType.toString(), tableName,
-              dbName, partitionName, columnName);
-      LOG.debug("Done executing query for listPrincipalPartitionColumnGrants");
-      pm.retrieveAll(mSecurityColList);
+      mSecurityColList.addAll(mPrivs);
 
-      success = commitTransaction();
-      LOG
-          .debug("Done retrieving all objects for listPrincipalPartitionColumnGrants");
+      LOG.debug("Done retrieving all objects for listPrincipalPartitionColumnGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
     return mSecurityColList;
   }
 
   @Override
-  public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
-      String principalName, PrincipalType principalType) {
+  public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(String principalName,
+      PrincipalType principalType) {
     boolean success = false;
+    Query query = null;
     try {
       openTransaction();
       LOG.debug("Executing listPrincipalPartitionColumnGrantsAll");
       List<MPartitionColumnPrivilege> mSecurityTabPartList;
       if (principalName != null && principalType != null) {
-        Query query = pm.newQuery(MPartitionColumnPrivilege.class,
-            "principalName == t1 && principalType == t2");
+        query =
+            pm.newQuery(MPartitionColumnPrivilege.class,
+                "principalName == t1 && principalType == t2");
         query.declareParameters("java.lang.String t1, java.lang.String t2");
-        mSecurityTabPartList = (List<MPartitionColumnPrivilege>)
-            query.executeWithArray(principalName, principalType.toString());
+        mSecurityTabPartList =
+            (List<MPartitionColumnPrivilege>) query.executeWithArray(principalName,
+                principalType.toString());
       } else {
-        Query query = pm.newQuery(MPartitionColumnPrivilege.class);
+        query = pm.newQuery(MPartitionColumnPrivilege.class);
         mSecurityTabPartList = (List<MPartitionColumnPrivilege>) query.execute();
       }
       LOG.debug("Done executing query for listPrincipalPartitionColumnGrantsAll");
@@ -4960,23 +5093,29 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
   @Override
-  public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(
-      String dbName, String tableName, String partitionName, String columnName) {
+  public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName, String tableName,
+      String partitionName, String columnName) {
     boolean success = false;
+    Query query = null;
     try {
       openTransaction();
       LOG.debug("Executing listPartitionColumnGrantsAll");
-      Query query = pm.newQuery(MPartitionColumnPrivilege.class,
-          "partition.table.tableName == t3 && partition.table.database.name == t4 && " +
-          "partition.partitionName == t5 && columnName == t6");
-      query.declareParameters(
-          "java.lang.String t3, java.lang.String t4, java.lang.String t5, java.lang.String t6");
-      List<MPartitionColumnPrivilege> mSecurityTabPartList = (List<MPartitionColumnPrivilege>)
-          query.executeWithArray(tableName, dbName, partitionName, columnName);
+      query =
+          pm.newQuery(MPartitionColumnPrivilege.class,
+              "partition.table.tableName == t3 && partition.table.database.name == t4 && "
+                  + "partition.partitionName == t5 && columnName == t6");
+      query
+          .declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5, java.lang.String t6");
+      List<MPartitionColumnPrivilege> mSecurityTabPartList =
+          (List<MPartitionColumnPrivilege>) query.executeWithArray(tableName, dbName,
+              partitionName, columnName);
       LOG.debug("Done executing query for listPartitionColumnGrantsAll");
       pm.retrieveAll(mSecurityTabPartList);
       List<HiveObjectPrivilege> result = convertPartCols(mSecurityTabPartList);
@@ -4987,6 +5126,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -5012,23 +5154,22 @@ public class ObjectStore implements RawStore, Configurable {
 
   @SuppressWarnings("unchecked")
   private List<MTablePrivilege> listPrincipalAllTableGrants(
-      String principalName, PrincipalType principalType) {
+      String principalName, PrincipalType principalType, QueryWrapper queryWrapper) {
     boolean success = false;
     List<MTablePrivilege> mSecurityTabPartList = null;
     try {
-      openTransaction();
       LOG.debug("Executing listPrincipalAllTableGrants");
-      Query query = pm.newQuery(MTablePrivilege.class,
+
+      openTransaction();
+      Query query = queryWrapper.query = pm.newQuery(MTablePrivilege.class,
           "principalName == t1 && principalType == t2");
       query.declareParameters("java.lang.String t1, java.lang.String t2");
       mSecurityTabPartList = (List<MTablePrivilege>) query.execute(
           principalName, principalType.toString());
-      LOG
-          .debug("Done executing query for listPrincipalAllTableGrants");
       pm.retrieveAll(mSecurityTabPartList);
       success = commitTransaction();
-      LOG
-          .debug("Done retrieving all objects for listPrincipalAllTableGrants");
+
+      LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
@@ -5038,21 +5179,21 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
-      String principalName, PrincipalType principalType) {
+  public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(String principalName,
+      PrincipalType principalType) {
     boolean success = false;
+    Query query = null;
     try {
       openTransaction();
       LOG.debug("Executing listPrincipalAllTableGrants");
       List<MTablePrivilege> mSecurityTabPartList;
       if (principalName != null && principalType != null) {
-        Query query = pm.newQuery(MTablePrivilege.class,
-            "principalName == t1 && principalType == t2");
+        query = pm.newQuery(MTablePrivilege.class, "principalName == t1 && principalType == t2");
         query.declareParameters("java.lang.String t1, java.lang.String t2");
-        mSecurityTabPartList = (List<MTablePrivilege>) query.execute(
-            principalName, principalType.toString());
+        mSecurityTabPartList =
+            (List<MTablePrivilege>) query.execute(principalName, principalType.toString());
       } else {
-        Query query = pm.newQuery(MTablePrivilege.class);
+        query = pm.newQuery(MTablePrivilege.class);
         mSecurityTabPartList = (List<MTablePrivilege>) query.execute();
       }
       LOG.debug("Done executing query for listPrincipalAllTableGrants");
@@ -5065,20 +5206,24 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
   @Override
   public List<HiveObjectPrivilege> listTableGrantsAll(String dbName, String tableName) {
     boolean success = false;
+    Query query = null;
     try {
       openTransaction();
       LOG.debug("Executing listTableGrantsAll");
-      Query query = pm.newQuery(MTablePrivilege.class,
-          "table.tableName == t1 && table.database.name == t2");
+      query =
+          pm.newQuery(MTablePrivilege.class, "table.tableName == t1 && table.database.name == t2");
       query.declareParameters("java.lang.String t1, java.lang.String t2");
-      List<MTablePrivilege> mSecurityTabPartList = (List<MTablePrivilege>)
-          query.executeWithArray(tableName, dbName);
+      List<MTablePrivilege> mSecurityTabPartList =
+          (List<MTablePrivilege>) query.executeWithArray(tableName, dbName);
       LOG.debug("Done executing query for listTableGrantsAll");
       pm.retrieveAll(mSecurityTabPartList);
       List<HiveObjectPrivilege> result = convertTable(mSecurityTabPartList);
@@ -5089,6 +5234,9 @@ public class ObjectStore implements RawStore, Configurable {
       if (!success) {
         rollbackTransaction();
       }
+      if (query != null) {
+        query.closeAll();
+      }
     }
   }
 
@@ -5112,24 +5260,20 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @SuppressWarnings("unchecked")
-  private List<MPartitionPrivilege> listPrincipalAllPartitionGrants(
-      String principalName, PrincipalType principalType) {
+  private List<MPartitionPrivilege> listPrincipalAllPartitionGrants(String principalName,
+      PrincipalType principalType, QueryWrapper queryWrapper) {
     boolean success = false;
     List<MPartitionPrivilege> mSecurityTabPartList = null;
     try {
       openTransaction();
       LOG.debug("Executing listPrincipalAllPartitionGrants");
-      Query query = pm.newQuery(MPartitionPrivilege.class,
-          "principalName == t1 && principalType == t2");
+      Query query = queryWrapper.query = pm.newQuery(MPartitionPrivilege.class, "principalName == t1 && principalType == t2");
       query.declareParameters("java.lang.String t1, java.lang.String t2");
-      mSecurityTabPartList = (List<MPartitionPrivilege>) query.execute(
-          principalName, principalType.toString());
-      LOG
-          .debug("Done executing query for listPrincipalAllPartitionGrants");
+      mSecurityTabPartList =
+          (List<MPartitionPrivilege>) query.execute(principalName, principalType.toString());
       pm.retrieveAll(mSecurityTabPartList);
       success = commitTransaction();
-      LOG
-          .debug("Done retrieving all objects for listPrincipalAllPartitionGrants");
+      LOG.debug("Done retrieving all objects for listPrincipalAllPartitionGrants");
     } finally {
       if (!success) {
         rollbackTransaction();
@@ -5139,21 +5283,22 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
-      String principalName, PrincipalType principalType) {
+  public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(String principalName,
+      PrincipalType principalType) {
     boolean success = false;
+    Query query = null;
     try {
       openTransaction();
       LOG.deb

<TRUNCATED>

[18/50] [abbrv] hive git commit: HIVE-11206 : CBO (Calcite Return Path): Join translation should update all ExprNode recursively (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-11206 : CBO (Calcite Return Path): Join translation should update all ExprNode recursively (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e6ea691d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e6ea691d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e6ea691d

Branch: refs/heads/beeline-cli
Commit: e6ea691d34b1ee12c335a2023849c5c445bfb660
Parents: 20f2c29
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Fri Jul 10 04:29:00 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Fri Jul 10 08:46:22 2015 -0700

----------------------------------------------------------------------
 .../calcite/translator/HiveOpConverter.java     | 51 ++++++++++++--------
 1 file changed, 30 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e6ea691d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index 84c6cc8..86ac4d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -906,22 +906,11 @@ public class HiveOpConverter {
       // 3. We populate the filters structure
       List<ExprNodeDesc> filtersForInput = new ArrayList<ExprNodeDesc>();
       for (ExprNodeDesc expr : filterExpressions[pos]) {
-        if (expr instanceof ExprNodeGenericFuncDesc) {
-          ExprNodeGenericFuncDesc func = (ExprNodeGenericFuncDesc) expr;
-          List<ExprNodeDesc> newChildren = new ArrayList<ExprNodeDesc>();
-          for (ExprNodeDesc functionChild : func.getChildren()) {
-            if (functionChild instanceof ExprNodeColumnDesc) {
-              newChildren.add(colExprMap.get(functionChild.getExprString()));
-            } else {
-              newChildren.add(functionChild);
-            }
-          }
-          func.setChildren(newChildren);
-          filtersForInput.add(expr);
-        }
-        else {
-          filtersForInput.add(expr);
-        }
+        // We need to update the exprNode, as currently 
+        // they refer to columns in the output of the join;
+        // they should refer to the columns output by the RS
+        updateExprNode(expr, colExprMap);
+        filtersForInput.add(expr);
       }
       filters.put(tag, filtersForInput);
     }
@@ -930,11 +919,6 @@ public class HiveOpConverter {
             filters, joinExpressions);
     desc.setReversedExprs(reversedExprs);
 
-    // 4. Create and populate filter map
-    int[][] filterMap = new int[joinExpressions.length][];
-
-    desc.setFilterMap(filterMap);
-
     JoinOperator joinOp = (JoinOperator) OperatorFactory.getAndMakeChild(desc, new RowSchema(
         outputColumns), childOps);
     joinOp.setColumnExprMap(colExprMap);
@@ -947,6 +931,31 @@ public class HiveOpConverter {
     return joinOp;
   }
 
+  /*
+   * This method updates the input expr, changing all the
+   * ExprNodeColumnDesc in it to refer to columns given by the
+   * colExprMap.
+   * 
+   * For instance, "col_0 = 1" would become "VALUE.col_0 = 1";
+   * the execution engine expects filters in the Join operators
+   * to be expressed that way.
+   */
+  private static void updateExprNode(ExprNodeDesc expr, Map<String, ExprNodeDesc> colExprMap) {
+    if (expr instanceof ExprNodeGenericFuncDesc) {
+      ExprNodeGenericFuncDesc func = (ExprNodeGenericFuncDesc) expr;
+      List<ExprNodeDesc> newChildren = new ArrayList<ExprNodeDesc>();
+      for (ExprNodeDesc functionChild : func.getChildren()) {
+        if (functionChild instanceof ExprNodeColumnDesc) {
+          newChildren.add(colExprMap.get(functionChild.getExprString()));
+        } else {
+          updateExprNode(functionChild, colExprMap);
+          newChildren.add(functionChild);
+        }
+      }
+      func.setChildren(newChildren);
+    }
+  }
+
   private static JoinType extractJoinType(HiveJoin join) {
     // UNIQUE
     if (join.isDistinct()) {


[24/50] [abbrv] hive git commit: HIVE-11194 - Exchange partition on external tables should fail with error message when target folder already exists (Aihua Xu, reviewd by Chao Sun)

Posted by xu...@apache.org.
HIVE-11194 - Exchange partition on external tables should fail with error message when target folder already exists (Aihua Xu, reviewd by Chao Sun)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8121b9ab
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8121b9ab
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8121b9ab

Branch: refs/heads/beeline-cli
Commit: 8121b9ab644cbe477df477827dd82a9859a7791b
Parents: 65e9fcf
Author: Chao Sun <su...@apache.org>
Authored: Mon Jul 13 09:36:22 2015 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Mon Jul 13 09:36:22 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/common/FileUtils.java    |  9 +++-
 .../hadoop/hive/metastore/HiveMetaStore.java    |  2 +-
 .../queries/clientnegative/exchange_partition.q | 19 +++++++
 .../clientnegative/exchange_partition.q.out     | 54 ++++++++++++++++++++
 4 files changed, 82 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8121b9ab/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index c2c54bc..7e4f386 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -30,7 +30,6 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.DefaultFileAccess;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -635,6 +634,14 @@ public final class FileUtils {
                                Path destPath, boolean inheritPerms,
                                Configuration conf) throws IOException {
     LOG.info("Renaming " + sourcePath + " to " + destPath);
+
+    // If destPath directory exists, rename call will move the sourcePath
+    // into destPath without failing. So check it before renaming.
+    if (fs.exists(destPath)) {
+      throw new IOException("Cannot rename the source path. The destination "
+          + "path already exists.");
+    }
+
     if (!inheritPerms) {
       //just rename the directory
       return fs.rename(sourcePath, destPath);

http://git-wip-us.apache.org/repos/asf/hive/blob/8121b9ab/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 4c9cd79..920e762 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2581,7 +2581,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         pathCreated = wh.renameDir(sourcePath, destPath);
         success = ms.commitTransaction();
       } finally {
-        if (!success) {
+        if (!success || !pathCreated) {
           ms.rollbackTransaction();
           if (pathCreated) {
             wh.renameDir(destPath, sourcePath);

http://git-wip-us.apache.org/repos/asf/hive/blob/8121b9ab/ql/src/test/queries/clientnegative/exchange_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/exchange_partition.q b/ql/src/test/queries/clientnegative/exchange_partition.q
new file mode 100644
index 0000000..7dc4f57
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/exchange_partition.q
@@ -0,0 +1,19 @@
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ex_table1;
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ex_table1/part=part1;
+CREATE EXTERNAL TABLE ex_table1 ( key INT, value STRING)
+    PARTITIONED BY (part STRING)
+    STORED AS textfile
+        LOCATION 'file:${system:test.tmp.dir}/ex_table1';
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ex_table2;
+
+CREATE EXTERNAL TABLE ex_table2 ( key INT, value STRING)
+    PARTITIONED BY (part STRING)
+    STORED AS textfile
+	LOCATION 'file:${system:test.tmp.dir}/ex_table2';
+
+INSERT OVERWRITE TABLE ex_table2 PARTITION (part='part1')
+SELECT key, value FROM src WHERE key < 10;
+SHOW PARTITIONS ex_table2;
+
+ALTER TABLE ex_table1 EXCHANGE PARTITION (part='part1') WITH TABLE ex_table2;

http://git-wip-us.apache.org/repos/asf/hive/blob/8121b9ab/ql/src/test/results/clientnegative/exchange_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/exchange_partition.q.out b/ql/src/test/results/clientnegative/exchange_partition.q.out
new file mode 100644
index 0000000..b81fb99
--- /dev/null
+++ b/ql/src/test/results/clientnegative/exchange_partition.q.out
@@ -0,0 +1,54 @@
+PREHOOK: query: CREATE EXTERNAL TABLE ex_table1 ( key INT, value STRING)
+    PARTITIONED BY (part STRING)
+    STORED AS textfile
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ex_table1
+POSTHOOK: query: CREATE EXTERNAL TABLE ex_table1 ( key INT, value STRING)
+    PARTITIONED BY (part STRING)
+    STORED AS textfile
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ex_table1
+PREHOOK: query: CREATE EXTERNAL TABLE ex_table2 ( key INT, value STRING)
+    PARTITIONED BY (part STRING)
+    STORED AS textfile
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ex_table2
+POSTHOOK: query: CREATE EXTERNAL TABLE ex_table2 ( key INT, value STRING)
+    PARTITIONED BY (part STRING)
+    STORED AS textfile
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ex_table2
+PREHOOK: query: INSERT OVERWRITE TABLE ex_table2 PARTITION (part='part1')
+SELECT key, value FROM src WHERE key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@ex_table2@part=part1
+POSTHOOK: query: INSERT OVERWRITE TABLE ex_table2 PARTITION (part='part1')
+SELECT key, value FROM src WHERE key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@ex_table2@part=part1
+POSTHOOK: Lineage: ex_table2 PARTITION(part=part1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ex_table2 PARTITION(part=part1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SHOW PARTITIONS ex_table2
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@ex_table2
+POSTHOOK: query: SHOW PARTITIONS ex_table2
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@ex_table2
+part=part1
+PREHOOK: query: ALTER TABLE ex_table1 EXCHANGE PARTITION (part='part1') WITH TABLE ex_table2
+PREHOOK: type: null
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.)


[33/50] [abbrv] hive git commit: HIVE-11252 : CBO (Calcite Return Path): DUMMY project in plan (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-11252 : CBO (Calcite Return Path): DUMMY project in plan (Jesus Camacho Rodriguez via Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/999e0e36
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/999e0e36
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/999e0e36

Branch: refs/heads/beeline-cli
Commit: 999e0e3616525d77cf46c5865f9981b5a6b5609a
Parents: 90a2cf9
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue Jul 14 08:22:00 2015 +0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue Jul 14 11:18:33 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/CalcitePlanner.java    | 64 +++++++++-----------
 1 file changed, 30 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/999e0e36/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 84bb951..1ea236b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -863,38 +863,20 @@ public class CalcitePlanner extends SemanticAnalyzer {
       calciteOptimizedPlan = hepPlanner.findBestExp();
 
       // 4. Run rule to try to remove projects on top of join operators
-      hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP);
-      hepPgmBldr.addRuleInstance(HiveJoinCommuteRule.INSTANCE);
-      hepPlanner = new HepPlanner(hepPgmBldr.build());
-      hepPlanner.registerMetadataProviders(list);
-      cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
-      hepPlanner.setRoot(calciteOptimizedPlan);
-      calciteOptimizedPlan = hepPlanner.findBestExp();
+      calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(),
+              HepMatchOrder.BOTTOM_UP, HiveJoinCommuteRule.INSTANCE);
 
       // 5. Run rule to fix windowing issue when it is done over
       // aggregation columns (HIVE-10627)
-      hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP);
-      hepPgmBldr.addRuleInstance(HiveWindowingFixRule.INSTANCE);
-      hepPlanner = new HepPlanner(hepPgmBldr.build());
-      hepPlanner.registerMetadataProviders(list);
-      cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
-      hepPlanner.setRoot(calciteOptimizedPlan);
-      calciteOptimizedPlan = hepPlanner.findBestExp();
+      calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(),
+              HepMatchOrder.BOTTOM_UP, HiveWindowingFixRule.INSTANCE);
 
       // 6. Run rules to aid in translation from Calcite tree to Hive tree
       if (HiveConf.getBoolVar(conf, ConfVars.HIVE_CBO_RETPATH_HIVEOP)) {
         // 6.1. Merge join into multijoin operators (if possible)
-        hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP);
-        hepPgmBldr.addRuleInstance(HiveJoinToMultiJoinRule.INSTANCE);
-        hepPgmBldr = hepPgmBldr.addRuleCollection(ImmutableList.of(
-                HiveJoinProjectTransposeRule.BOTH_PROJECT,
-                HiveJoinToMultiJoinRule.INSTANCE,
-                HiveProjectMergeRule.INSTANCE));
-        hepPlanner = new HepPlanner(hepPgmBldr.build());
-        hepPlanner.registerMetadataProviders(list);
-        cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
-        hepPlanner.setRoot(calciteOptimizedPlan);
-        calciteOptimizedPlan = hepPlanner.findBestExp();
+        calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, true, mdProvider.getMetadataProvider(),
+                HepMatchOrder.BOTTOM_UP, HiveJoinProjectTransposeRule.BOTH_PROJECT,
+                HiveJoinToMultiJoinRule.INSTANCE, HiveProjectMergeRule.INSTANCE);
         // The previous rules can pull up projections through join operators,
         // thus we run the field trimmer again to push them back down
         HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY,
@@ -902,16 +884,14 @@ public class CalcitePlanner extends SemanticAnalyzer {
             HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSort.HIVE_SORT_REL_FACTORY,
             HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY);
         calciteOptimizedPlan = fieldTrimmer.trim(calciteOptimizedPlan);
+        calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(),
+                HepMatchOrder.BOTTOM_UP, ProjectRemoveRule.INSTANCE,
+                new ProjectMergeRule(false, HiveProject.DEFAULT_PROJECT_FACTORY));
 
         // 6.2.  Introduce exchange operators below join/multijoin operators
-        hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP);
-        hepPgmBldr.addRuleInstance(HiveInsertExchange4JoinRule.EXCHANGE_BELOW_JOIN);
-        hepPgmBldr.addRuleInstance(HiveInsertExchange4JoinRule.EXCHANGE_BELOW_MULTIJOIN);
-        hepPlanner = new HepPlanner(hepPgmBldr.build());
-        hepPlanner.registerMetadataProviders(list);
-        cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
-        hepPlanner.setRoot(calciteOptimizedPlan);
-        calciteOptimizedPlan = hepPlanner.findBestExp();
+        calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(),
+                HepMatchOrder.BOTTOM_UP, HiveInsertExchange4JoinRule.EXCHANGE_BELOW_JOIN,
+                HiveInsertExchange4JoinRule.EXCHANGE_BELOW_MULTIJOIN);
       }
 
       if (LOG.isDebugEnabled() && !conf.getBoolVar(ConfVars.HIVE_IN_TEST)) {
@@ -1006,11 +986,27 @@ public class CalcitePlanner extends SemanticAnalyzer {
      */
     private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges,
         RelMetadataProvider mdProvider, RelOptRule... rules) {
+      return hepPlan(basePlan, followPlanChanges, mdProvider,
+              HepMatchOrder.TOP_DOWN, rules);
+    }
+
+    /**
+     * Run the HEP Planner with the given rule set.
+     *
+     * @param basePlan
+     * @param followPlanChanges
+     * @param mdProvider
+     * @param order
+     * @param rules
+     * @return optimized RelNode
+     */
+    private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadataProvider mdProvider,
+            HepMatchOrder order, RelOptRule... rules) {
 
       RelNode optimizedRelNode = basePlan;
       HepProgramBuilder programBuilder = new HepProgramBuilder();
       if (followPlanChanges) {
-        programBuilder.addMatchOrder(HepMatchOrder.TOP_DOWN);
+        programBuilder.addMatchOrder(order);
         programBuilder = programBuilder.addRuleCollection(ImmutableList.copyOf(rules));
       } else {
         // TODO: Should this be also TOP_DOWN?


[12/50] [abbrv] hive git commit: HIVE-11198: Fix load data query file format check for partitioned tables (Prasanth Jayachandran reviewed by Sushanth Sowmyan)

Posted by xu...@apache.org.
HIVE-11198: Fix load data query file format check for partitioned tables (Prasanth Jayachandran reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a2dabcb8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a2dabcb8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a2dabcb8

Branch: refs/heads/beeline-cli
Commit: a2dabcb8c7bc52bd8def1402fa649420ef41e3bd
Parents: be89eac
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Thu Jul 9 11:20:29 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Thu Jul 9 11:20:58 2015 -0700

----------------------------------------------------------------------
 .../hive/ql/parse/LoadSemanticAnalyzer.java     | 12 +++-
 .../clientnegative/load_orc_negative_part.q     | 14 ++++
 .../test/queries/clientpositive/load_orc_part.q | 15 +++++
 .../clientnegative/load_orc_negative_part.q.out | 52 +++++++++++++++
 .../results/clientpositive/load_orc_part.q.out  | 70 ++++++++++++++++++++
 5 files changed, 162 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a2dabcb8/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 187dc20..944cee4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -335,7 +335,17 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private void ensureFileFormatsMatch(TableSpec ts, URI fromURI) throws SemanticException {
-    Class<? extends InputFormat> destInputFormat = ts.tableHandle.getInputFormatClass();
+    final Class<? extends InputFormat> destInputFormat;
+    try {
+      if (ts.getPartSpec() == null || ts.getPartSpec().isEmpty()) {
+        destInputFormat = ts.tableHandle.getInputFormatClass();
+      } else {
+        destInputFormat = ts.partHandle.getInputFormatClass();
+      }
+    } catch (HiveException e) {
+      throw new SemanticException(e);
+    }
+
     // Other file formats should do similar check to make sure file formats match
     // when doing LOAD DATA .. INTO TABLE
     if (OrcInputFormat.class.equals(destInputFormat)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a2dabcb8/ql/src/test/queries/clientnegative/load_orc_negative_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/load_orc_negative_part.q b/ql/src/test/queries/clientnegative/load_orc_negative_part.q
new file mode 100644
index 0000000..5de4917
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/load_orc_negative_part.q
@@ -0,0 +1,14 @@
+set hive.default.fileformat=ORC;
+create table orc_staging (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp);
+create table orc_test (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (ds string);
+
+load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_staging/;
+
+load data inpath '${hiveconf:hive.metastore.warehouse.dir}/orc_staging/orc_split_elim.orc' into table orc_test partition (ds='10');
+load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=10/;
+
+alter table orc_test add partition(ds='11');
+load data local inpath '../../data/files/kv1.txt' into table orc_test partition(ds='11');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=11/;

http://git-wip-us.apache.org/repos/asf/hive/blob/a2dabcb8/ql/src/test/queries/clientpositive/load_orc_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_orc_part.q b/ql/src/test/queries/clientpositive/load_orc_part.q
new file mode 100644
index 0000000..0927ea4
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/load_orc_part.q
@@ -0,0 +1,15 @@
+set hive.default.fileformat=ORC;
+create table orc_staging (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp);
+create table orc_test (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (ds string);
+
+load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_staging/;
+
+load data inpath '${hiveconf:hive.metastore.warehouse.dir}/orc_staging/orc_split_elim.orc' into table orc_test partition (ds='10');
+load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=10/;
+
+alter table orc_test add partition(ds='11');
+alter table orc_test partition(ds='11') set fileformat textfile;
+load data local inpath '../../data/files/kv1.txt' into table orc_test partition(ds='11');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=11/;

http://git-wip-us.apache.org/repos/asf/hive/blob/a2dabcb8/ql/src/test/results/clientnegative/load_orc_negative_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/load_orc_negative_part.q.out b/ql/src/test/results/clientnegative/load_orc_negative_part.q.out
new file mode 100644
index 0000000..32dd627
--- /dev/null
+++ b/ql/src/test/results/clientnegative/load_orc_negative_part.q.out
@@ -0,0 +1,52 @@
+PREHOOK: query: create table orc_staging (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_staging
+POSTHOOK: query: create table orc_staging (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_staging
+PREHOOK: query: create table orc_test (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_test
+POSTHOOK: query: create table orc_test (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_test
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_staging
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_staging
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_test
+#### A masked pattern was here ####
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_test
+POSTHOOK: Output: default@orc_test@ds=10
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_test@ds=10
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_test@ds=10
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: alter table orc_test add partition(ds='11')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@orc_test
+POSTHOOK: query: alter table orc_test add partition(ds='11')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@orc_test
+POSTHOOK: Output: default@orc_test@ds=11
+FAILED: SemanticException [Error 30019]: The file that you are trying to load does not match the file format of the destination table. Destination table is stored as ORC but the file being loaded is not a valid ORC file.

http://git-wip-us.apache.org/repos/asf/hive/blob/a2dabcb8/ql/src/test/results/clientpositive/load_orc_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_orc_part.q.out b/ql/src/test/results/clientpositive/load_orc_part.q.out
new file mode 100644
index 0000000..34ca493
--- /dev/null
+++ b/ql/src/test/results/clientpositive/load_orc_part.q.out
@@ -0,0 +1,70 @@
+PREHOOK: query: create table orc_staging (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_staging
+POSTHOOK: query: create table orc_staging (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_staging
+PREHOOK: query: create table orc_test (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_test
+POSTHOOK: query: create table orc_test (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_test
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_staging
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_staging
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_test
+#### A masked pattern was here ####
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_test
+POSTHOOK: Output: default@orc_test@ds=10
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_test@ds=10
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_test@ds=10
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: alter table orc_test add partition(ds='11')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@orc_test
+POSTHOOK: query: alter table orc_test add partition(ds='11')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@orc_test
+POSTHOOK: Output: default@orc_test@ds=11
+PREHOOK: query: alter table orc_test partition(ds='11') set fileformat textfile
+PREHOOK: type: ALTERPARTITION_FILEFORMAT
+PREHOOK: Input: default@orc_test
+PREHOOK: Output: default@orc_test@ds=11
+POSTHOOK: query: alter table orc_test partition(ds='11') set fileformat textfile
+POSTHOOK: type: ALTERPARTITION_FILEFORMAT
+POSTHOOK: Input: default@orc_test
+POSTHOOK: Input: default@orc_test@ds=11
+POSTHOOK: Output: default@orc_test@ds=11
+PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table orc_test partition(ds='11')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_test@ds=11
+POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table orc_test partition(ds='11')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_test@ds=11
+Found 1 items
+#### A masked pattern was here ####


[16/50] [abbrv] hive git commit: HIVE-11221: In Tez mode, alter table concatenate orc files can intermittently fail with NPE (Prasanth Jayachandran reviewed by Vikram Dixit)

Posted by xu...@apache.org.
HIVE-11221: In Tez mode, alter table concatenate orc files can intermittently fail with NPE (Prasanth Jayachandran reviewed by Vikram Dixit)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b61e6b52
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b61e6b52
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b61e6b52

Branch: refs/heads/beeline-cli
Commit: b61e6b52b54c9f8914aa6e4e042ff2921ce6a947
Parents: d89a7d1
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Fri Jul 10 00:19:37 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Fri Jul 10 00:19:37 2015 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   1 +
 .../ql/exec/tez/MergeFileRecordProcessor.java   |  42 +++--
 .../results/clientpositive/tez/orc_merge9.q.out | 186 +++++++++++++++++++
 3 files changed, 216 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b61e6b52/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 8773bd3..97715fc 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -139,6 +139,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   orc_merge6.q,\
   orc_merge7.q,\
   orc_merge8.q,\
+  orc_merge9.q,\
   orc_merge_incompat1.q,\
   orc_merge_incompat2.q,\
   orc_vectorization_ppd.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/b61e6b52/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
index b95ab42..fce1523 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
@@ -17,9 +17,8 @@
  */
 package org.apache.hadoop.hive.ql.exec.tez;
 
-import java.io.IOException;
+import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.concurrent.Callable;
 
 import org.apache.commons.logging.Log;
@@ -41,11 +40,14 @@ import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.tez.mapreduce.input.MRInputLegacy;
 import org.apache.tez.mapreduce.processor.MRTaskReporter;
+import org.apache.tez.runtime.api.Input;
 import org.apache.tez.runtime.api.LogicalInput;
 import org.apache.tez.runtime.api.LogicalOutput;
 import org.apache.tez.runtime.api.ProcessorContext;
 import org.apache.tez.runtime.library.api.KeyValueReader;
 
+import com.google.common.collect.Lists;
+
 /**
  * Record processor for fast merging of files.
  */
@@ -219,22 +221,36 @@ public class MergeFileRecordProcessor extends RecordProcessor {
   }
 
   private MRInputLegacy getMRInput(Map<String, LogicalInput> inputs) throws Exception {
-    // there should be only one MRInput
-    MRInputLegacy theMRInput = null;
-    LOG.info("VDK: the inputs are: " + inputs);
-    for (Entry<String, LogicalInput> inp : inputs.entrySet()) {
-      if (inp.getValue() instanceof MRInputLegacy) {
-        if (theMRInput != null) {
+    LOG.info("The inputs are: " + inputs);
+
+    // start the mr input and wait for ready event. number of MRInput is expected to be 1
+    List<Input> li = Lists.newArrayList();
+    int numMRInputs = 0;
+    for (LogicalInput inp : inputs.values()) {
+      if (inp instanceof MRInputLegacy) {
+        numMRInputs++;
+        if (numMRInputs > 1) {
           throw new IllegalArgumentException("Only one MRInput is expected");
         }
-        // a better logic would be to find the alias
-        theMRInput = (MRInputLegacy) inp.getValue();
+        inp.start();
+        li.add(inp);
       } else {
-        throw new IOException("Expecting only one input of type MRInputLegacy. Found type: "
-            + inp.getClass().getCanonicalName());
+        throw new IllegalArgumentException("Expecting only one input of type MRInputLegacy." +
+            " Found type: " + inp.getClass().getCanonicalName());
       }
     }
-    theMRInput.init();
+
+    // typically alter table .. concatenate is run on only one partition/one table,
+    // so it doesn't matter if we wait for all inputs or any input to be ready.
+    processorContext.waitForAnyInputReady(li);
+
+    final MRInputLegacy theMRInput;
+    if (li.size() == 1) {
+      theMRInput = (MRInputLegacy) li.get(0);
+      theMRInput.init();
+    } else {
+      throw new IllegalArgumentException("MRInputs count is expected to be 1");
+    }
 
     return theMRInput;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/b61e6b52/ql/src/test/results/clientpositive/tez/orc_merge9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_merge9.q.out b/ql/src/test/results/clientpositive/tez/orc_merge9.q.out
new file mode 100644
index 0000000..bdf0fd3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/orc_merge9.q.out
@@ -0,0 +1,186 @@
+PREHOOK: query: create table ts_merge (
+userid bigint,
+string1 string,
+subtype double,
+decimal1 decimal(38,18),
+ts timestamp
+) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: create table ts_merge (
+userid bigint,
+string1 string,
+subtype double,
+decimal1 decimal(38,18),
+ts timestamp
+) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ts_merge
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' overwrite into table ts_merge
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' overwrite into table ts_merge
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@ts_merge
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table ts_merge
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table ts_merge
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@ts_merge
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from ts_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from ts_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+50000
+PREHOOK: query: alter table ts_merge concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@ts_merge
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: alter table ts_merge concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@ts_merge
+POSTHOOK: Output: default@ts_merge
+PREHOOK: query: select count(*) from ts_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from ts_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+50000
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- incompatible merge test (stripe statistics missing)
+
+create table a_merge like alltypesorc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: -- incompatible merge test (stripe statistics missing)
+
+create table a_merge like alltypesorc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a_merge
+PREHOOK: query: insert overwrite table a_merge select * from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: insert overwrite table a_merge select * from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@a_merge
+POSTHOOK: Lineage: a_merge.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: a_merge.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: a_merge.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: a_merge.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table a_merge
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table a_merge
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@a_merge
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+24576
+PREHOOK: query: alter table a_merge concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@a_merge
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: alter table a_merge concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@a_merge
+POSTHOOK: Output: default@a_merge
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+24576
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: insert into table a_merge select * from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: insert into table a_merge select * from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@a_merge
+POSTHOOK: Lineage: a_merge.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: a_merge.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: a_merge.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: a_merge.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+36864
+PREHOOK: query: alter table a_merge concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@a_merge
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: alter table a_merge concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@a_merge
+POSTHOOK: Output: default@a_merge
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+36864
+Found 2 items
+#### A masked pattern was here ####


[17/50] [abbrv] hive git commit: HIVE-11197 : While extracting join conditions follow Hive rules for type conversion instead of Calcite (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by xu...@apache.org.
HIVE-11197 : While extracting join conditions follow Hive rules for type conversion instead of Calcite (Ashutosh Chauhan via Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/20f2c29f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/20f2c29f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/20f2c29f

Branch: refs/heads/beeline-cli
Commit: 20f2c29f42725c0dd82acc5e3d170d7423003b47
Parents: b61e6b5
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Fri Jul 10 08:40:00 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Fri Jul 10 08:40:00 2015 -0700

----------------------------------------------------------------------
 .../ql/optimizer/calcite/HiveCalciteUtil.java   | 25 ++++----
 .../ql/optimizer/calcite/HiveRelOptUtil.java    | 36 +++++------
 .../calcite/cost/HiveOnTezCostModel.java        | 25 ++++++--
 .../calcite/reloperators/HiveJoin.java          | 11 ++--
 .../calcite/reloperators/HiveMultiJoin.java     | 13 +++-
 .../rules/HiveInsertExchange4JoinRule.java      | 13 +++-
 .../calcite/rules/HiveJoinAddNotNullRule.java   | 16 +++--
 .../calcite/rules/HiveJoinToMultiJoinRule.java  | 64 +++++++-------------
 .../calcite/stats/HiveRelMdSelectivity.java     | 11 ++--
 9 files changed, 120 insertions(+), 94 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index 024097e..0200506 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -339,25 +339,25 @@ public class HiveCalciteUtil {
       return this.mapOfProjIndxInJoinSchemaToLeafPInfo;
     }
 
-    public static JoinPredicateInfo constructJoinPredicateInfo(Join j) {
+    public static JoinPredicateInfo constructJoinPredicateInfo(Join j) throws CalciteSemanticException {
       return constructJoinPredicateInfo(j, j.getCondition());
     }
 
-    public static JoinPredicateInfo constructJoinPredicateInfo(HiveMultiJoin mj) {
+    public static JoinPredicateInfo constructJoinPredicateInfo(HiveMultiJoin mj) throws CalciteSemanticException {
       return constructJoinPredicateInfo(mj, mj.getCondition());
     }
 
-    public static JoinPredicateInfo constructJoinPredicateInfo(Join j, RexNode predicate) {
+    public static JoinPredicateInfo constructJoinPredicateInfo(Join j, RexNode predicate) throws CalciteSemanticException {
       return constructJoinPredicateInfo(j.getInputs(), j.getSystemFieldList(), predicate);
     }
 
-    public static JoinPredicateInfo constructJoinPredicateInfo(HiveMultiJoin mj, RexNode predicate) {
+    public static JoinPredicateInfo constructJoinPredicateInfo(HiveMultiJoin mj, RexNode predicate) throws CalciteSemanticException {
       final List<RelDataTypeField> systemFieldList = ImmutableList.of();
       return constructJoinPredicateInfo(mj.getInputs(), systemFieldList, predicate);
     }
 
     public static JoinPredicateInfo constructJoinPredicateInfo(List<RelNode> inputs,
-            List<RelDataTypeField> systemFieldList, RexNode predicate) {
+            List<RelDataTypeField> systemFieldList, RexNode predicate) throws CalciteSemanticException {
       JoinPredicateInfo jpi = null;
       JoinLeafPredicateInfo jlpi = null;
       List<JoinLeafPredicateInfo> equiLPIList = new ArrayList<JoinLeafPredicateInfo>();
@@ -504,7 +504,7 @@ public class HiveCalciteUtil {
     // split accordingly. If the join condition is not part of the equi-join predicate,
     // the returned object will be typed as SQLKind.OTHER.
     private static JoinLeafPredicateInfo constructJoinLeafPredicateInfo(List<RelNode> inputs,
-            List<RelDataTypeField> systemFieldList, RexNode pe) {
+            List<RelDataTypeField> systemFieldList, RexNode pe) throws CalciteSemanticException {
       JoinLeafPredicateInfo jlpi = null;
       List<Integer> filterNulls = new ArrayList<Integer>();
       List<List<RexNode>> joinExprs = new ArrayList<List<RexNode>>();
@@ -513,7 +513,7 @@ public class HiveCalciteUtil {
       }
 
       // 1. Split leaf join predicate to expressions from left, right
-      RexNode otherConditions = HiveRelOptUtil.splitJoinCondition(systemFieldList, inputs, pe,
+      RexNode otherConditions = HiveRelOptUtil.splitHiveJoinCondition(systemFieldList, inputs, pe,
           joinExprs, filterNulls, null);
 
       if (otherConditions.isAlwaysTrue()) {
@@ -689,7 +689,7 @@ public class HiveCalciteUtil {
   public static ImmutableList<RexNode> getInputRef(List<Integer> inputRefs, RelNode inputRel) {
     ImmutableList.Builder<RexNode> bldr = ImmutableList.<RexNode> builder();
     for (int i : inputRefs) {
-      bldr.add(new RexInputRef(i, (RelDataType) inputRel.getRowType().getFieldList().get(i).getType()));
+      bldr.add(new RexInputRef(i, inputRel.getRowType().getFieldList().get(i).getType()));
     }
     return bldr.build();
   }
@@ -697,7 +697,7 @@ public class HiveCalciteUtil {
   public static ExprNodeDesc getExprNode(Integer inputRefIndx, RelNode inputRel,
       ExprNodeConverter exprConv) {
     ExprNodeDesc exprNode = null;
-    RexNode rexInputRef = new RexInputRef(inputRefIndx, (RelDataType) inputRel.getRowType()
+    RexNode rexInputRef = new RexInputRef(inputRefIndx, inputRel.getRowType()
         .getFieldList().get(inputRefIndx).getType());
     exprNode = rexInputRef.accept(exprConv);
 
@@ -723,9 +723,9 @@ public class HiveCalciteUtil {
     for (Integer iRef : inputRefs) {
       fieldNames.add(schemaNames.get(iRef));
     }
-    
+
     return fieldNames;
-  }  
+  }
 
   /**
    * Walks over an expression and determines whether it is constant.
@@ -789,12 +789,13 @@ public class HiveCalciteUtil {
 
   private static class InputRefsCollector extends RexVisitorImpl<Void> {
 
-    private Set<Integer> inputRefSet = new HashSet<Integer>();
+    private final Set<Integer> inputRefSet = new HashSet<Integer>();
 
     private InputRefsCollector(boolean deep) {
       super(deep);
     }
 
+    @Override
     public Void visitInputRef(RexInputRef inputRef) {
       inputRefSet.add(inputRef.getIndex());
       return null;

http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
index 9ebb24f..ab793f1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
@@ -7,7 +7,6 @@ import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
@@ -17,11 +16,13 @@ import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.Util;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 
-import com.google.common.collect.ImmutableList;
 
 public class HiveRelOptUtil extends RelOptUtil {
 
@@ -48,14 +49,15 @@ public class HiveRelOptUtil extends RelOptUtil {
    *                      join predicate are at the end of the key lists
    *                      returned
    * @return What's left, never null
+   * @throws CalciteSemanticException
    */
-  public static RexNode splitJoinCondition(
+  public static RexNode splitHiveJoinCondition(
       List<RelDataTypeField> sysFieldList,
       List<RelNode> inputs,
       RexNode condition,
       List<List<RexNode>> joinKeys,
       List<Integer> filterNulls,
-      List<SqlOperator> rangeOp) {
+      List<SqlOperator> rangeOp) throws CalciteSemanticException {
     final List<RexNode> nonEquiList = new ArrayList<>();
 
     splitJoinCondition(
@@ -79,11 +81,10 @@ public class HiveRelOptUtil extends RelOptUtil {
       List<List<RexNode>> joinKeys,
       List<Integer> filterNulls,
       List<SqlOperator> rangeOp,
-      List<RexNode> nonEquiList) {
+      List<RexNode> nonEquiList) throws CalciteSemanticException {
     final int sysFieldCount = sysFieldList.size();
     final RelOptCluster cluster = inputs.get(0).getCluster();
     final RexBuilder rexBuilder = cluster.getRexBuilder();
-    final RelDataTypeFactory typeFactory = cluster.getTypeFactory();
 
     final ImmutableBitSet[] inputsRange = new ImmutableBitSet[inputs.size()];
     int totalFieldCount = 0;
@@ -199,24 +200,25 @@ public class HiveRelOptUtil extends RelOptUtil {
           RelDataType rightKeyType = rightKey.getType();
 
           if (leftKeyType != rightKeyType) {
-            // perform casting
-            RelDataType targetKeyType =
-                typeFactory.leastRestrictive(
-                    ImmutableList.of(leftKeyType, rightKeyType));
+            // perform casting using Hive rules
+            TypeInfo rType = TypeConverter.convert(rightKeyType);
+            TypeInfo lType = TypeConverter.convert(leftKeyType);
+            TypeInfo tgtType = FunctionRegistry.getCommonClassForComparison(lType, rType);
 
-            if (targetKeyType == null) {
-              throw Util.newInternal(
+            if (tgtType == null) {
+              throw new CalciteSemanticException(
                   "Cannot find common type for join keys "
-                  + leftKey + " (type " + leftKeyType + ") and "
-                  + rightKey + " (type " + rightKeyType + ")");
+                      + leftKey + " (type " + leftKeyType + ") and "
+                      + rightKey + " (type " + rightKeyType + ")");
             }
+            RelDataType targetKeyType = TypeConverter.convert(tgtType, rexBuilder.getTypeFactory());
 
-            if (leftKeyType != targetKeyType) {
+            if (leftKeyType != targetKeyType && TypeInfoUtils.isConversionRequiredForComparison(tgtType, lType)) {
               leftKey =
                   rexBuilder.makeCast(targetKeyType, leftKey);
             }
 
-            if (rightKeyType != targetKeyType) {
+            if (rightKeyType != targetKeyType && TypeInfoUtils.isConversionRequiredForComparison(tgtType, rType)) {
               rightKey =
                   rexBuilder.makeCast(targetKeyType, rightKey);
             }

http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
index fb67309..e9f1d96 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
@@ -29,7 +29,10 @@ import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.ImmutableIntList;
 import org.apache.calcite.util.Pair;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
@@ -48,6 +51,8 @@ public class HiveOnTezCostModel extends HiveCostModel {
 
   private static HiveAlgorithmsUtil algoUtils;
 
+  private static transient final Log LOG = LogFactory.getLog(HiveOnTezCostModel.class);
+
   synchronized public static HiveOnTezCostModel getCostModel(HiveConf conf) {
     if (INSTANCE == null) {
       INSTANCE = new HiveOnTezCostModel(conf);
@@ -136,7 +141,13 @@ public class HiveOnTezCostModel extends HiveCostModel {
               add(leftRCount).
               add(rightRCount).
               build();
-      final double cpuCost = algoUtils.computeSortMergeCPUCost(cardinalities, join.getSortedInputs());
+      double cpuCost;
+      try {
+        cpuCost = algoUtils.computeSortMergeCPUCost(cardinalities, join.getSortedInputs());
+      } catch (CalciteSemanticException e) {
+        LOG.trace("Failed to compute sort merge cpu cost ", e);
+        return null;
+      }
       // 3. IO cost = cost of writing intermediary results to local FS +
       //              cost of reading from local FS for transferring to join +
       //              cost of transferring map outputs to Join operator
@@ -183,7 +194,7 @@ public class HiveOnTezCostModel extends HiveCostModel {
       if (memoryWithinPhase == null || splitCount == null) {
         return null;
       }
-      
+
       return memoryWithinPhase / splitCount;
     }
 
@@ -289,7 +300,7 @@ public class HiveOnTezCostModel extends HiveCostModel {
       if (join.getStreamingSide() != MapJoinStreamingRelation.LEFT_RELATION
               || join.getStreamingSide() != MapJoinStreamingRelation.RIGHT_RELATION) {
         return null;
-      }      
+      }
       return HiveAlgorithmsUtil.getJoinDistribution(join.getJoinPredicateInfo(),
               join.getStreamingSide());
     }
@@ -521,7 +532,13 @@ public class HiveOnTezCostModel extends HiveCostModel {
       for (int i=0; i<join.getInputs().size(); i++) {
         RelNode input = join.getInputs().get(i);
         // Is smbJoin possible? We need correct order
-        boolean orderFound = join.getSortedInputs().get(i);
+        boolean orderFound;
+        try {
+          orderFound = join.getSortedInputs().get(i);
+        } catch (CalciteSemanticException e) {
+          LOG.trace("Not possible to do SMB Join ",e);
+          return false;
+        }
         if (!orderFound) {
           return false;
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
index 668960e..6814df6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
@@ -41,6 +41,7 @@ import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.ImmutableIntList;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
@@ -51,7 +52,7 @@ import com.google.common.collect.ImmutableList;
 
 //TODO: Should we convert MultiJoin to be a child of HiveJoin
 public class HiveJoin extends Join implements HiveRelNode {
-  
+
   public static final JoinFactory HIVE_JOIN_FACTORY = new HiveJoinFactoryImpl();
 
   public enum MapJoinStreamingRelation {
@@ -71,14 +72,14 @@ public class HiveJoin extends Join implements HiveRelNode {
       HiveJoin join = new HiveJoin(cluster, null, left, right, condition, joinType, variablesStopped,
               DefaultJoinAlgorithm.INSTANCE, leftSemiJoin);
       return join;
-    } catch (InvalidRelException e) {
+    } catch (InvalidRelException | CalciteSemanticException e) {
       throw new RuntimeException(e);
     }
   }
 
   protected HiveJoin(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right,
       RexNode condition, JoinRelType joinType, Set<String> variablesStopped,
-      JoinAlgorithm joinAlgo, boolean leftSemiJoin) throws InvalidRelException {
+      JoinAlgorithm joinAlgo, boolean leftSemiJoin) throws InvalidRelException, CalciteSemanticException {
     super(cluster, TraitsUtil.getDefaultTraitSet(cluster), left, right, condition, joinType,
         variablesStopped);
     this.joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(this);
@@ -97,7 +98,7 @@ public class HiveJoin extends Join implements HiveRelNode {
       Set<String> variablesStopped = Collections.emptySet();
       return new HiveJoin(getCluster(), traitSet, left, right, conditionExpr, joinType,
           variablesStopped, joinAlgorithm, leftSemiJoin);
-    } catch (InvalidRelException e) {
+    } catch (InvalidRelException | CalciteSemanticException e) {
       // Semantic error not possible. Must be a bug. Convert to
       // internal error.
       throw new AssertionError(e);
@@ -170,7 +171,7 @@ public class HiveJoin extends Join implements HiveRelNode {
     return smallInput;
   }
 
-  public ImmutableBitSet getSortedInputs() {
+  public ImmutableBitSet getSortedInputs() throws CalciteSemanticException {
     ImmutableBitSet.Builder sortedInputsBuilder = new ImmutableBitSet.Builder();
     JoinPredicateInfo joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.
             constructJoinPredicateInfo(this);

http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
index 911ceda..7a43f29 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
@@ -31,6 +31,7 @@ import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexShuttle;
 import org.apache.calcite.util.Pair;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
@@ -60,7 +61,7 @@ public final class HiveMultiJoin extends AbstractRelNode {
    * @param inputs                inputs into this multi-join
    * @param condition            join filter applicable to this join node
    * @param rowType               row type of the join result of this node
-   * @param joinInputs            
+   * @param joinInputs
    * @param joinTypes             the join type corresponding to each input; if
    *                              an input is null-generating in a left or right
    *                              outer join, the entry indicates the type of
@@ -84,7 +85,11 @@ public final class HiveMultiJoin extends AbstractRelNode {
     this.joinTypes = ImmutableList.copyOf(joinTypes);
     this.outerJoin = containsOuter();
 
-    this.joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(this);
+    try {
+      this.joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(this);
+    } catch (CalciteSemanticException e) {
+      throw new RuntimeException(e);
+    }
   }
 
 
@@ -105,6 +110,7 @@ public final class HiveMultiJoin extends AbstractRelNode {
         joinTypes);
   }
 
+  @Override
   public RelWriter explainTerms(RelWriter pw) {
     List<String> joinsString = new ArrayList<String>();
     for (int i = 0; i < joinInputs.size(); i++) {
@@ -122,10 +128,12 @@ public final class HiveMultiJoin extends AbstractRelNode {
         .item("joinsDescription", joinsString);
   }
 
+  @Override
   public RelDataType deriveRowType() {
     return rowType;
   }
 
+  @Override
   public List<RelNode> getInputs() {
     return inputs;
   }
@@ -134,6 +142,7 @@ public final class HiveMultiJoin extends AbstractRelNode {
     return ImmutableList.of(condition);
   }
 
+  @Override
   public RelNode accept(RexShuttle shuttle) {
     RexNode joinFilter = shuttle.apply(this.condition);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
index c5ab055..39c69a4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
@@ -32,6 +32,7 @@ import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rex.RexNode;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
@@ -75,10 +76,18 @@ public class HiveInsertExchange4JoinRule extends RelOptRule {
     JoinPredicateInfo joinPredInfo;
     if (call.rel(0) instanceof HiveMultiJoin) {
       HiveMultiJoin multiJoin = call.rel(0);
-      joinPredInfo =  HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(multiJoin);
+      try {
+        joinPredInfo =  HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(multiJoin);
+      } catch (CalciteSemanticException e) {
+        throw new RuntimeException(e);
+      }
     } else if (call.rel(0) instanceof Join) {
       Join join = call.rel(0);
-      joinPredInfo =  HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(join);
+      try {
+        joinPredInfo =  HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(join);
+      } catch (CalciteSemanticException e) {
+        throw new RuntimeException(e);
+      }
     } else {
       return;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
index a4484ec..c4a40bf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
@@ -39,6 +39,7 @@ import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
@@ -46,12 +47,13 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 
+import com.esotericsoftware.minlog.Log;
 import com.google.common.collect.ImmutableList;
 
 public final class HiveJoinAddNotNullRule extends RelOptRule {
 
   private static final String NOT_NULL_FUNC_NAME = "isnotnull";
-  
+
   /** The singleton. */
   public static final HiveJoinAddNotNullRule INSTANCE =
       new HiveJoinAddNotNullRule(HiveFilter.DEFAULT_FILTER_FACTORY);
@@ -72,6 +74,7 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {
 
   //~ Methods ----------------------------------------------------------------
 
+  @Override
   public void onMatch(RelOptRuleCall call) {
     final Join join = call.rel(0);
     RelNode leftInput = call.rel(1);
@@ -85,8 +88,13 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {
       return;
     }
 
-    JoinPredicateInfo joinPredInfo =
-            HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(join);
+    JoinPredicateInfo joinPredInfo;
+    try {
+      joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(join);
+    } catch (CalciteSemanticException e) {
+      Log.trace("Failed to add is not null filter on join ", e);
+      return;
+    }
 
     Set<Integer> joinLeftKeyPositions = new HashSet<Integer>();
     Set<Integer> joinRightKeyPositions = new HashSet<Integer>();
@@ -133,7 +141,7 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {
 
     call.transformTo(newJoin);
   }
-  
+
   private static Map<String,RexNode> getNotNullConditions(RelOptCluster cluster,
           RexBuilder rexBuilder, RelNode input, Set<Integer> inputKeyPositions) {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
index c5e0e11..a0144f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
@@ -22,7 +22,6 @@ import java.util.List;
 
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rel.core.JoinRelType;
@@ -35,6 +34,9 @@ import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.Pair;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil;
@@ -56,6 +58,7 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
 
   private final ProjectFactory projectFactory;
 
+  private static transient final Log LOG = LogFactory.getLog(HiveJoinToMultiJoinRule.class);
 
   //~ Constructors -----------------------------------------------------------
 
@@ -142,8 +145,14 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
         leftJoinTypes = hmj.getJoinTypes();
       }
 
-      boolean combinable = isCombinablePredicate(join, join.getCondition(),
-              leftCondition);
+      boolean combinable;
+      try {
+        combinable = isCombinablePredicate(join, join.getCondition(),
+                leftCondition);
+      } catch (CalciteSemanticException e) {
+        LOG.trace("Failed to merge joins", e);
+        combinable = false;
+      }
       if (combinable) {
         newJoinFilters.add(leftCondition);
         for (int i = 0; i < leftJoinInputs.size(); i++) {
@@ -172,8 +181,14 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
     for (int i=0; i<newInputs.size(); i++) {
       joinKeyExprs.add(new ArrayList<RexNode>());
     }
-    RexNode otherCondition = HiveRelOptUtil.splitJoinCondition(systemFieldList, newInputs, join.getCondition(),
-        joinKeyExprs, filterNulls, null);
+    RexNode otherCondition;
+    try {
+      otherCondition = HiveRelOptUtil.splitHiveJoinCondition(systemFieldList, newInputs, join.getCondition(),
+          joinKeyExprs, filterNulls, null);
+    } catch (CalciteSemanticException e) {
+        LOG.trace("Failed to merge joins", e);
+        return null;
+    }
     // If there are remaining parts in the condition, we bail out
     if (!otherCondition.isAlwaysTrue()) {
       return null;
@@ -221,7 +236,7 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
   }
 
   private static boolean isCombinablePredicate(Join join,
-          RexNode condition, RexNode otherCondition) {
+          RexNode condition, RexNode otherCondition) throws CalciteSemanticException {
     final JoinPredicateInfo joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.
             constructJoinPredicateInfo(join, condition);
     final JoinPredicateInfo otherJoinPredInfo = HiveCalciteUtil.JoinPredicateInfo.
@@ -236,41 +251,4 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
     }
     return true;
   }
-
-  /**
-   * Shifts a filter originating from the right child of the LogicalJoin to the
-   * right, to reflect the filter now being applied on the resulting
-   * MultiJoin.
-   *
-   * @param joinRel     the original LogicalJoin
-   * @param left        the left child of the LogicalJoin
-   * @param right       the right child of the LogicalJoin
-   * @param rightFilter the filter originating from the right child
-   * @return the adjusted right filter
-   */
-  private static RexNode shiftRightFilter(
-      Join joinRel,
-      RelNode left,
-      RelNode right,
-      RexNode rightFilter) {
-    if (rightFilter == null) {
-      return null;
-    }
-
-    int nFieldsOnLeft = left.getRowType().getFieldList().size();
-    int nFieldsOnRight = right.getRowType().getFieldList().size();
-    int[] adjustments = new int[nFieldsOnRight];
-    for (int i = 0; i < nFieldsOnRight; i++) {
-      adjustments[i] = nFieldsOnLeft;
-    }
-    rightFilter =
-        rightFilter.accept(
-            new RelOptUtil.RexInputConverter(
-                joinRel.getCluster().getRexBuilder(),
-                right.getRowType().getFieldList(),
-                joinRel.getRowType().getFieldList(),
-                adjustments));
-    return rightFilter;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
index 960ec40..715f24f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
@@ -32,6 +32,7 @@ import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.util.BuiltInMethod;
 import org.apache.calcite.util.Pair;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
@@ -57,14 +58,14 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
     return 1.0;
   }
 
-  public Double getSelectivity(HiveJoin j, RexNode predicate) {
+  public Double getSelectivity(HiveJoin j, RexNode predicate) throws CalciteSemanticException {
     if (j.getJoinType().equals(JoinRelType.INNER)) {
       return computeInnerJoinSelectivity(j, predicate);
     }
     return 1.0;
   }
 
-  private Double computeInnerJoinSelectivity(HiveJoin j, RexNode predicate) {
+  private Double computeInnerJoinSelectivity(HiveJoin j, RexNode predicate) throws CalciteSemanticException {
     double ndvCrossProduct = 1;
     Pair<Boolean, RexNode> predInfo =
         getCombinedPredicateForJoin(j, predicate);
@@ -183,7 +184,7 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
   }
 
   /**
-   * 
+   *
    * @param j
    * @param additionalPredicate
    * @return if predicate is the join condition return (true, joinCond)
@@ -206,7 +207,7 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
 
   /**
    * Compute Max NDV to determine Join Selectivity.
-   * 
+   *
    * @param jlpi
    * @param colStatMap
    *          Immutable Map of Projection Index (in Join Schema) to Column Stat
@@ -238,5 +239,5 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
 
     return maxNDVSoFar;
   }
-  
+
 }


[05/50] [abbrv] hive git commit: HIVE-10927 : Add number of HMS/HS2 connection metrics (Szehon, reviewed by Jimmy Xiang)

Posted by xu...@apache.org.
HIVE-10927 : Add number of HMS/HS2 connection metrics (Szehon, reviewed by Jimmy Xiang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f6ea8cb6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f6ea8cb6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f6ea8cb6

Branch: refs/heads/beeline-cli
Commit: f6ea8cb6fd74c8ccef2b712c0e6da76c30266f53
Parents: 7df153d
Author: Szehon Ho <sz...@cloudera.com>
Authored: Wed Jul 8 11:38:41 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Wed Jul 8 11:38:41 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/common/JvmPauseMonitor.java     |  7 ++-
 .../hive/common/metrics/LegacyMetrics.java      | 30 ++++++++-
 .../hive/common/metrics/common/Metrics.java     | 27 ++++++++
 .../common/metrics/common/MetricsConstant.java  | 35 +++++++++++
 .../common/metrics/common/MetricsVariable.java  | 26 ++++++++
 .../metrics/metrics2/CodahaleMetrics.java       | 58 ++++++++++++++++-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  4 +-
 .../metrics/metrics2/TestCodahaleMetrics.java   | 42 +++++++++++++
 .../hive/metastore/TestMetaStoreMetrics.java    | 66 +++++++++++++++++---
 .../hadoop/hive/metastore/HiveMetaStore.java    | 59 ++++++++++++++---
 .../hadoop/hive/metastore/ObjectStore.java      | 30 ++++++++-
 .../service/cli/thrift/ThriftCLIService.java    | 21 ++++++-
 12 files changed, 378 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java b/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
index ec5ac4a..6ffaf94 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
@@ -27,6 +27,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.metrics.common.Metrics;
+import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.util.Daemon;
 
@@ -186,14 +187,14 @@ public class JvmPauseMonitor {
           ++numGcWarnThresholdExceeded;
           LOG.warn(formatMessage(
             extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep));
-          incrementMetricsCounter("jvm.pause.warn-threshold", 1);
+          incrementMetricsCounter(MetricsConstant.JVM_PAUSE_WARN, 1);
         } else if (extraSleepTime > infoThresholdMs) {
           ++numGcInfoThresholdExceeded;
           LOG.info(formatMessage(
             extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep));
-          incrementMetricsCounter("jvm.pause.info-threshold", 1);
+          incrementMetricsCounter(MetricsConstant.JVM_PAUSE_INFO, 1);
         }
-        incrementMetricsCounter("jvm.pause.extraSleepTime", extraSleepTime);
+        incrementMetricsCounter(MetricsConstant.JVM_EXTRA_SLEEP, extraSleepTime);
         totalGcExtraSleepTime += extraSleepTime;
         gcTimesBeforeSleep = gcTimesAfterSleep;
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/common/src/java/org/apache/hadoop/hive/common/metrics/LegacyMetrics.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/LegacyMetrics.java b/common/src/java/org/apache/hadoop/hive/common/metrics/LegacyMetrics.java
index e811339..52d99e4 100644
--- a/common/src/java/org/apache/hadoop/hive/common/metrics/LegacyMetrics.java
+++ b/common/src/java/org/apache/hadoop/hive/common/metrics/LegacyMetrics.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.common.metrics;
 
 import org.apache.hadoop.hive.common.metrics.common.Metrics;
+import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
 import org.apache.hadoop.hive.conf.HiveConf;
 
 import java.io.IOException;
@@ -162,11 +163,11 @@ public class LegacyMetrics implements Metrics {
     mbs.registerMBean(metrics, oname);
   }
 
-  public Long incrementCounter(String name) throws IOException{
+  public Long incrementCounter(String name) throws IOException {
     return incrementCounter(name,Long.valueOf(1));
   }
 
-  public Long incrementCounter(String name, long increment) throws IOException{
+  public Long incrementCounter(String name, long increment) throws IOException {
     Long value;
     synchronized(metrics) {
       if (!metrics.hasKey(name)) {
@@ -180,6 +181,29 @@ public class LegacyMetrics implements Metrics {
     return value;
   }
 
+  public Long decrementCounter(String name) throws IOException{
+    return decrementCounter(name, Long.valueOf(1));
+  }
+
+  public Long decrementCounter(String name, long decrement) throws IOException {
+    Long value;
+    synchronized(metrics) {
+      if (!metrics.hasKey(name)) {
+        value = Long.valueOf(decrement);
+        set(name, -value);
+      } else {
+        value = ((Long)get(name)) - decrement;
+        set(name, value);
+      }
+    }
+    return value;
+  }
+
+  @Override
+  public void addGauge(String name, MetricsVariable variable) {
+    //Not implemented.
+  }
+
   public void set(String name, Object value) throws IOException{
     metrics.put(name,value);
   }
@@ -210,6 +234,8 @@ public class LegacyMetrics implements Metrics {
     }
   }
 
+
+
   /**
    * Resets the static context state to initial.
    * Used primarily for testing purposes.

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/common/src/java/org/apache/hadoop/hive/common/metrics/common/Metrics.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/common/Metrics.java b/common/src/java/org/apache/hadoop/hive/common/metrics/common/Metrics.java
index 27b69cc..49b2b32 100644
--- a/common/src/java/org/apache/hadoop/hive/common/metrics/common/Metrics.java
+++ b/common/src/java/org/apache/hadoop/hive/common/metrics/common/Metrics.java
@@ -61,4 +61,31 @@ public interface Metrics {
    * @throws IOException
    */
   public Long incrementCounter(String name, long increment) throws IOException;
+
+
+  /**
+   * Decrements a counter of the given name by 1.
+   * @param name
+   * @return
+   * @throws IOException
+   */
+  public Long decrementCounter(String name) throws IOException;
+
+  /**
+   * Decrements a counter of the given name by "decrement"
+   * @param name
+   * @param decrement
+   * @return
+   * @throws IOException
+   */
+  public Long decrementCounter(String name, long decrement) throws IOException;
+
+
+  /**
+   * Adds a metrics-gauge to track variable.  For example, number of open database connections.
+   * @param name name of gauge
+   * @param variable variable to track.
+   * @throws IOException
+   */
+  public void addGauge(String name, final MetricsVariable variable);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java b/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java
new file mode 100644
index 0000000..d1ebe12
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.common.metrics.common;
+
+/**
+ * This class defines some metrics generated by Hive processes.
+ */
+public class MetricsConstant {
+
+  public static String JVM_PAUSE_INFO = "jvm.pause.info-threshold";
+  public static String JVM_PAUSE_WARN = "jvm.pause.warn-threshold";
+  public static String JVM_EXTRA_SLEEP = "jvm.pause.extraSleepTime";
+
+  public static String OPEN_CONNECTIONS = "open_connections";
+
+  public static String JDO_ACTIVE_TRANSACTIONS = "active_jdo_transactions";
+  public static String JDO_ROLLBACK_TRANSACTIONS = "rollbacked_jdo_transactions";
+  public static String JDO_COMMIT_TRANSACTIONS = "committed_jdo_transactions";
+  public static String JDO_OPEN_TRANSACTIONS = "opened_jdo_transactions";
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsVariable.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsVariable.java b/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsVariable.java
new file mode 100644
index 0000000..8cf6608
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsVariable.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.common.metrics.common;
+
+/**
+ * Interface for metrics variables. <p/> For example a the database service could expose the number of
+ * currently active connections.
+ */
+public interface MetricsVariable<T> {
+  public T getValue();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java
index ae353d0..7756f43 100644
--- a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java
+++ b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.common.metrics.metrics2;
 import com.codahale.metrics.ConsoleReporter;
 import com.codahale.metrics.Counter;
 import com.codahale.metrics.ExponentiallyDecayingReservoir;
+import com.codahale.metrics.Gauge;
 import com.codahale.metrics.JmxReporter;
 import com.codahale.metrics.Metric;
 import com.codahale.metrics.MetricRegistry;
@@ -44,6 +45,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
 import org.apache.hadoop.hive.conf.HiveConf;
 
 import java.io.BufferedReader;
@@ -52,12 +54,14 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.lang.management.ManagementFactory;
+import java.net.URI;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
@@ -73,9 +77,11 @@ public class CodahaleMetrics implements org.apache.hadoop.hive.common.metrics.co
   public final MetricRegistry metricRegistry = new MetricRegistry();
   private final Lock timersLock = new ReentrantLock();
   private final Lock countersLock = new ReentrantLock();
+  private final Lock gaugesLock = new ReentrantLock();
 
   private LoadingCache<String, Timer> timers;
   private LoadingCache<String, Counter> counters;
+  private ConcurrentHashMap<String, Gauge> gauges;
 
   private HiveConf conf;
   private final Set<Closeable> reporters = new HashSet<Closeable>();
@@ -161,6 +167,7 @@ public class CodahaleMetrics implements org.apache.hadoop.hive.common.metrics.co
         }
       }
     );
+    gauges = new ConcurrentHashMap<String, Gauge>();
 
     //register JVM metrics
     registerAll("gc", new GarbageCollectorMetricSet());
@@ -218,7 +225,7 @@ public class CodahaleMetrics implements org.apache.hadoop.hive.common.metrics.co
   }
 
   public Long incrementCounter(String name) throws IOException {
-    return incrementCounter(name, 1);
+    return incrementCounter(name, 1L);
   }
 
   public Long incrementCounter(String name, long increment) throws IOException {
@@ -234,6 +241,45 @@ public class CodahaleMetrics implements org.apache.hadoop.hive.common.metrics.co
     }
   }
 
+  public Long decrementCounter(String name) throws IOException {
+    return decrementCounter(name, 1L);
+  }
+
+  public Long decrementCounter(String name, long decrement) throws IOException {
+    String key = name;
+    try {
+      countersLock.lock();
+      counters.get(key).dec(decrement);
+      return counters.get(key).getCount();
+    } catch(ExecutionException ee) {
+      throw new RuntimeException(ee);
+    } finally {
+      countersLock.unlock();
+    }
+  }
+
+  public void addGauge(String name, final MetricsVariable variable) {
+    Gauge gauge = new Gauge() {
+      @Override
+      public Object getValue() {
+        return variable.getValue();
+      }
+    };
+    try {
+      gaugesLock.lock();
+      gauges.put(name, gauge);
+      // Metrics throws an Exception if we don't do this when the key already exists
+      if (metricRegistry.getGauges().containsKey(name)) {
+        LOGGER.warn("A Gauge with name [" + name + "] already exists. "
+          + " The old gauge will be overwritten, but this is not recommended");
+        metricRegistry.remove(name);
+      }
+      metricRegistry.register(name, gauge);
+    } finally {
+      gaugesLock.unlock();
+    }
+  }
+
   // This method is necessary to synchronize lazy-creation to the timers.
   private Timer getTimer(String name) throws IOException {
     String key = name;
@@ -312,11 +358,19 @@ public class CodahaleMetrics implements org.apache.hadoop.hive.common.metrics.co
           try {
             String json = jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(metricRegistry);
             Path tmpPath = new Path(pathString + ".tmp");
-            FileSystem fs = FileSystem.get(conf);
+            URI tmpPathURI = tmpPath.toUri();
+            FileSystem fs = null;
+            if (tmpPathURI.getScheme() == null && tmpPathURI.getAuthority() == null) {
+              //default local
+              fs = FileSystem.getLocal(conf);
+            } else {
+              fs = FileSystem.get(tmpPathURI, conf);
+            }
             fs.delete(tmpPath, true);
             bw = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath, true)));
             bw.write(json);
             bw.close();
+            fs.setPermission(tmpPath, FsPermission.createImmutable((short) 0644));
 
             Path path = new Path(pathString);
             fs.rename(tmpPath, path);

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 6d0cf15..4549105 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1725,8 +1725,8 @@ public class HiveConf extends Configuration {
         "Hive metrics subsystem implementation class."),
     HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "JSON_FILE, JMX",
         "Reporter type for metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics, comma separated list of JMX, CONSOLE, JSON_FILE"),
-    HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location", "file:///tmp/report.json",
-        "For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, the location of JSON metrics file.  " +
+    HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location", "/tmp/report.json",
+        "For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, the location of local JSON metrics file.  " +
         "This file will get overwritten at every interval."),
     HIVE_METRICS_JSON_FILE_INTERVAL("hive.service.metrics.file.frequency", "5s",
         new TimeValidator(TimeUnit.MILLISECONDS),

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java b/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
index 954b388..a3aa549 100644
--- a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
+++ b/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
@@ -22,7 +22,9 @@ import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
+import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.junit.After;
@@ -135,4 +137,44 @@ public class TestCodahaleMetrics {
     JsonNode countNode = methodCounterNode.path("count");
     Assert.assertEquals(countNode.asInt(), 5);
   }
+
+  class TestMetricsVariable implements MetricsVariable {
+    private int gaugeVal;
+
+    @Override
+    public Object getValue() {
+      return gaugeVal;
+    }
+    public void setValue(int gaugeVal) {
+      this.gaugeVal = gaugeVal;
+    }
+  };
+
+  @Test
+  public void testGauge() throws Exception {
+    TestMetricsVariable testVar = new TestMetricsVariable();
+    testVar.setValue(20);
+
+    MetricsFactory.getInstance().addGauge("gauge1", testVar);
+    Thread.sleep(2000);
+    byte[] jsonData = Files.readAllBytes(Paths.get(jsonReportFile.getAbsolutePath()));
+    ObjectMapper objectMapper = new ObjectMapper();
+
+    JsonNode rootNode = objectMapper.readTree(jsonData);
+    JsonNode gaugesNode = rootNode.path("gauges");
+    JsonNode methodGaugeNode = gaugesNode.path("gauge1");
+    JsonNode countNode = methodGaugeNode.path("value");
+    Assert.assertEquals(countNode.asInt(), testVar.getValue());
+
+    testVar.setValue(40);
+    Thread.sleep(2000);
+
+    jsonData = Files.readAllBytes(Paths.get(jsonReportFile.getAbsolutePath()));
+
+    rootNode = objectMapper.readTree(jsonData);
+    gaugesNode = rootNode.path("gauges");
+    methodGaugeNode = gaugesNode.path("gauge1");
+    countNode = methodGaugeNode.path("value");
+    Assert.assertEquals(countNode.asInt(), testVar.getValue());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
index 25f34d1..c9da95a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
@@ -24,8 +24,10 @@ import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.common.metrics.metrics2.MetricsReporting;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hive.service.server.HiveServer2;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -37,9 +39,11 @@ import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
+import java.util.Map;
 
 /**
  * Tests Hive Metastore Metrics.
+ *
  */
 public class TestMetaStoreMetrics {
 
@@ -49,9 +53,8 @@ public class TestMetaStoreMetrics {
   private static HiveConf hiveConf;
   private static Driver driver;
 
-
-  @Before
-  public void before() throws Exception {
+  @BeforeClass
+  public static void before() throws Exception {
 
     int port = MetaStoreUtils.findFreePort();
 
@@ -86,9 +89,58 @@ public class TestMetaStoreMetrics {
     ObjectMapper objectMapper = new ObjectMapper();
 
     JsonNode rootNode = objectMapper.readTree(jsonData);
-    JsonNode countersNode = rootNode.path("timers");
-    JsonNode methodCounterNode = countersNode.path("api_get_all_databases");
-    JsonNode countNode = methodCounterNode.path("count");
-    Assert.assertTrue(countNode.asInt() > 0);
+    JsonNode timersNode = rootNode.path("timers");
+    JsonNode methodCounterNode = timersNode.path("api_get_all_databases");
+    JsonNode methodCountNode = methodCounterNode.path("count");
+    Assert.assertTrue(methodCountNode.asInt() > 0);
+
+    JsonNode countersNode = rootNode.path("counters");
+    JsonNode committedJdoTxNode = countersNode.path("committed_jdo_transactions");
+    JsonNode committedCountNode = committedJdoTxNode.path("count");
+    Assert.assertTrue(committedCountNode.asInt() > 0);
+  }
+
+
+  @Test
+  public void testConnections() throws Exception {
+    byte[] jsonData = Files.readAllBytes(Paths.get(jsonReportFile.getAbsolutePath()));
+    ObjectMapper objectMapper = new ObjectMapper();
+    JsonNode rootNode = objectMapper.readTree(jsonData);
+    JsonNode countersNode = rootNode.path("counters");
+    JsonNode openCnxNode = countersNode.path("open_connections");
+    JsonNode openCnxCountNode = openCnxNode.path("count");
+    Assert.assertTrue(openCnxCountNode.asInt() == 1);
+
+    //create a second connection
+    HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
+    HiveMetaStoreClient msc2 = new HiveMetaStoreClient(hiveConf);
+    Thread.sleep(2000);
+
+    jsonData = Files.readAllBytes(Paths.get(jsonReportFile.getAbsolutePath()));
+    rootNode = objectMapper.readTree(jsonData);
+    countersNode = rootNode.path("counters");
+    openCnxNode = countersNode.path("open_connections");
+    openCnxCountNode = openCnxNode.path("count");
+    Assert.assertTrue(openCnxCountNode.asInt() == 3);
+
+    msc.close();
+    Thread.sleep(2000);
+
+    jsonData = Files.readAllBytes(Paths.get(jsonReportFile.getAbsolutePath()));
+    rootNode = objectMapper.readTree(jsonData);
+    countersNode = rootNode.path("counters");
+    openCnxNode = countersNode.path("open_connections");
+    openCnxCountNode = openCnxNode.path("count");
+    Assert.assertTrue(openCnxCountNode.asInt() == 2);
+
+    msc2.close();
+    Thread.sleep(2000);
+
+    jsonData = Files.readAllBytes(Paths.get(jsonReportFile.getAbsolutePath()));
+    rootNode = objectMapper.readTree(jsonData);
+    countersNode = rootNode.path("counters");
+    openCnxNode = countersNode.path("open_connections");
+    openCnxCountNode = openCnxNode.path("count");
+    Assert.assertTrue(openCnxCountNode.asInt() == 1);
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 0bcd053..4c9cd79 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.common.cli.CommonCliOptions;
+import org.apache.hadoop.hive.common.metrics.common.Metrics;
+import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -188,8 +190,11 @@ import org.apache.thrift.TException;
 import org.apache.thrift.TProcessor;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocol;
 import org.apache.thrift.protocol.TProtocolFactory;
+import org.apache.thrift.server.ServerContext;
 import org.apache.thrift.server.TServer;
+import org.apache.thrift.server.TServerEventHandler;
 import org.apache.thrift.server.TThreadPoolServer;
 import org.apache.thrift.transport.TFramedTransport;
 import org.apache.thrift.transport.TServerSocket;
@@ -821,14 +826,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           threadLocalMS.remove();
         }
       }
-      if (hiveConf.getBoolVar(ConfVars.METASTORE_METRICS)) {
-        try {
-          MetricsFactory.close();
-        } catch (Exception e) {
-          LOG.error("error in Metrics deinit: " + e.getClass().getName() + " "
-            + e.getMessage(), e);
-        }
-      }
       logInfo("Metastore shutdown complete.");
     }
 
@@ -5878,7 +5875,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
    */
   public static void main(String[] args) throws Throwable {
     HiveConf.setLoadMetastoreConfig(true);
-    HiveConf conf = new HiveConf(HMSHandler.class);
+    final HiveConf conf = new HiveConf(HMSHandler.class);
 
     HiveMetastoreCli cli = new HiveMetastoreCli(conf);
     cli.parse(args);
@@ -5921,6 +5918,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           if (isCliVerbose) {
             System.err.println(shutdownMsg);
           }
+          if (conf.getBoolVar(ConfVars.METASTORE_METRICS)) {
+            try {
+              MetricsFactory.close();
+            } catch (Exception e) {
+              LOG.error("error in Metrics deinit: " + e.getClass().getName() + " "
+                + e.getMessage(), e);
+            }
+          }
         }
       });
 
@@ -6057,6 +6062,42 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           .maxWorkerThreads(maxWorkerThreads);
 
       TServer tServer = new TThreadPoolServer(args);
+      TServerEventHandler tServerEventHandler = new TServerEventHandler() {
+        @Override
+        public void preServe() {
+        }
+
+        @Override
+        public ServerContext createContext(TProtocol tProtocol, TProtocol tProtocol1) {
+          try {
+            Metrics metrics = MetricsFactory.getInstance();
+            if (metrics != null) {
+              metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
+            }
+          } catch (Exception e) {
+            LOG.warn("Error Reporting Metastore open connection to Metrics system", e);
+          }
+          return null;
+        }
+
+        @Override
+        public void deleteContext(ServerContext serverContext, TProtocol tProtocol, TProtocol tProtocol1) {
+          try {
+            Metrics metrics = MetricsFactory.getInstance();
+            if (metrics != null) {
+              metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
+            }
+          } catch (Exception e) {
+            LOG.warn("Error Reporting Metastore close connection to Metrics system", e);
+          }
+        }
+
+        @Override
+        public void processContext(ServerContext serverContext, TTransport tTransport, TTransport tTransport1) {
+        }
+      };
+
+      tServer.setServerEventHandler(tServerEventHandler);
       HMSHandler.LOG.info("Started the new metaserver on port [" + port
           + "]...");
       HMSHandler.LOG.info("Options.minWorkerThreads = "

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 4273c0b..8f52f83 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -62,6 +62,10 @@ import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.hive.common.metrics.common.Metrics;
+import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
+import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
+import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
@@ -207,7 +211,7 @@ public class ObjectStore implements RawStore, Configurable {
   private MetaStoreDirectSql directSql = null;
   private PartitionExpressionProxy expressionProxy = null;
   private Configuration hiveConf;
-  int openTrasactionCalls = 0;
+  private volatile int openTrasactionCalls = 0;
   private Transaction currentTransaction = null;
   private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE;
 
@@ -257,6 +261,17 @@ public class ObjectStore implements RawStore, Configurable {
 
       initialize(propsFromConf);
 
+      //Add metric for number of active JDO transactions.
+      Metrics metrics = MetricsFactory.getInstance();
+      if (metrics != null) {
+        metrics.addGauge(MetricsConstant.JDO_ACTIVE_TRANSACTIONS, new MetricsVariable() {
+          @Override
+          public Object getValue() {
+            return openTrasactionCalls;
+          }
+        });
+      }
+
       String partitionValidationRegex =
           hiveConf.get(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.name());
       if (partitionValidationRegex != null && partitionValidationRegex.equals("")) {
@@ -430,6 +445,7 @@ public class ObjectStore implements RawStore, Configurable {
 
     boolean result = currentTransaction.isActive();
     debugLog("Open transaction: count = " + openTrasactionCalls + ", isActive = " + result);
+    incrementMetricsCount(MetricsConstant.JDO_OPEN_TRANSACTIONS);
     return result;
   }
 
@@ -468,6 +484,7 @@ public class ObjectStore implements RawStore, Configurable {
       currentTransaction.commit();
     }
 
+    incrementMetricsCount(MetricsConstant.JDO_COMMIT_TRANSACTIONS);
     return true;
   }
 
@@ -505,6 +522,7 @@ public class ObjectStore implements RawStore, Configurable {
       // from reattaching in future transactions
       pm.evictAll();
     }
+    incrementMetricsCount(MetricsConstant.JDO_ROLLBACK_TRANSACTIONS);
   }
 
   @Override
@@ -6807,6 +6825,16 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
+  private void incrementMetricsCount(String name) {
+    try {
+      Metrics metrics = MetricsFactory.getInstance();
+      if (metrics != null) {
+        metrics.incrementCounter(name);
+      }
+    } catch (Exception e) {
+      LOG.warn("Error Reporting JDO operation to Metrics system", e);
+    }
+  }
 
   private void debugLog(String message) {
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f6ea8cb6/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
index dfb7faa..67bc778 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
@@ -30,6 +30,9 @@ import javax.security.auth.login.LoginException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.metrics.common.Metrics;
+import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
+import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hive.service.AbstractService;
@@ -108,13 +111,29 @@ public abstract class ThriftCLIService extends AbstractService implements TCLISe
       @Override
       public ServerContext createContext(
           TProtocol input, TProtocol output) {
+        Metrics metrics = MetricsFactory.getInstance();
+        if (metrics != null) {
+          try {
+            metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
+          } catch (Exception e) {
+            LOG.warn("Error Reporting JDO operation to Metrics system", e);
+          }
+        }
         return new ThriftCLIServerContext();
       }
 
       @Override
       public void deleteContext(ServerContext serverContext,
           TProtocol input, TProtocol output) {
-        ThriftCLIServerContext context = (ThriftCLIServerContext)serverContext;
+        Metrics metrics = MetricsFactory.getInstance();
+        if (metrics != null) {
+          try {
+            metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
+          } catch (Exception e) {
+            LOG.warn("Error Reporting JDO operation to Metrics system", e);
+          }
+        }
+        ThriftCLIServerContext context = (ThriftCLIServerContext) serverContext;
         SessionHandle sessionHandle = context.getSessionHandle();
         if (sessionHandle != null) {
           LOG.info("Session disconnected without closing properly, close it now");


[36/50] [abbrv] hive git commit: HIVE-11174: Hive does not treat floating point signed zeros as equal (-0.0 should equal 0.0 according to IEEE floating point spec) (Sergio Pena, reviewed by Xuefu Zhang)

Posted by xu...@apache.org.
HIVE-11174: Hive does not treat floating point signed zeros as equal (-0.0 should equal 0.0 according to IEEE floating point spec) (Sergio Pena, reviewed by Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/af4aeab9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/af4aeab9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/af4aeab9

Branch: refs/heads/beeline-cli
Commit: af4aeab9c0dffc5f8e42428bf8b835dccc8771ef
Parents: 6e0d480
Author: Sergio Pena <se...@cloudera.com>
Authored: Wed Jul 15 09:47:06 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Wed Jul 15 09:47:06 2015 -0500

----------------------------------------------------------------------
 .../objectinspector/ObjectInspectorUtils.java   | 18 ++++++++++++++--
 .../TestObjectInspectorUtils.java               | 22 ++++++++++++++++++++
 2 files changed, 38 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/af4aeab9/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
index 041d218..6ef9f5d 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
@@ -722,12 +722,26 @@ public final class ObjectInspectorUtils {
       case FLOAT: {
         float v1 = ((FloatObjectInspector) poi1).get(o1);
         float v2 = ((FloatObjectInspector) poi2).get(o2);
-        return Float.compare(v1, v2);
+
+        // The IEEE 754 floating point spec specifies that signed -0.0 and 0.0 should be treated as equal.
+        if (v1 == 0.0f && v2 == 0.0f) {
+          return 0;
+        } else {
+          // Float.compare() treats -0.0 and 0.0 as different
+          return Float.compare(v1, v2);
+        }
       }
       case DOUBLE: {
         double v1 = ((DoubleObjectInspector) poi1).get(o1);
         double v2 = ((DoubleObjectInspector) poi2).get(o2);
-        return Double.compare(v1, v2);
+
+        // The IEEE 754 floating point spec specifies that signed -0.0 and 0.0 should be treated as equal.
+        if (v1 == 0.0d && v2 == 0.0d) {
+          return 0;
+        } else {
+          // Double.compare() treats -0.0 and 0.0 as different
+          return Double.compare(v1, v2);
+        }
       }
       case STRING: {
         if (poi1.preferWritable() || poi2.preferWritable()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/af4aeab9/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorUtils.java b/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorUtils.java
index f3fd6fa..ade0ef7 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorUtils.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorUtils.java
@@ -34,6 +34,28 @@ import org.apache.hadoop.hive.serde2.thrift.test.IntString;
  */
 public class TestObjectInspectorUtils extends TestCase {
 
+  public void testCompareFloatingNumberSignedZero() {
+    PrimitiveObjectInspector doubleOI = PrimitiveObjectInspectorFactory
+        .getPrimitiveJavaObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.DOUBLE);
+
+    Double d1 = Double.valueOf("0.0");
+    Double d2 = Double.valueOf("-0.0");
+    assertEquals(0, ObjectInspectorUtils.compare(d1, doubleOI, d2, doubleOI));
+    assertEquals(0, ObjectInspectorUtils.compare(d2, doubleOI, d1, doubleOI));
+    assertEquals(0, ObjectInspectorUtils.compare(d1, doubleOI, d1, doubleOI));
+    assertEquals(0, ObjectInspectorUtils.compare(d2, doubleOI, d2, doubleOI));
+
+    PrimitiveObjectInspector floatOI = PrimitiveObjectInspectorFactory
+        .getPrimitiveJavaObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.FLOAT);
+
+    Float f1 = Float.valueOf("0.0");
+    Float f2 = Float.valueOf("-0.0");
+    assertEquals(0, ObjectInspectorUtils.compare(f1, floatOI, f2, floatOI));
+    assertEquals(0, ObjectInspectorUtils.compare(f2, floatOI, f1, floatOI));
+    assertEquals(0, ObjectInspectorUtils.compare(f1, floatOI, f1, floatOI));
+    assertEquals(0, ObjectInspectorUtils.compare(f2, floatOI, f2, floatOI));
+  }
+
   public void testObjectInspectorUtils() throws Throwable {
     try {
       ObjectInspector oi1 = ObjectInspectorFactory


[22/50] [abbrv] hive git commit: HIVE-11030 - Enhance storage layer to create one delta file per write (Eugene Koifman, reviewed by Alan Gates)

Posted by xu...@apache.org.
HIVE-11030 - Enhance storage layer to create one delta file per write (Eugene Koifman, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/66feedc5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/66feedc5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/66feedc5

Branch: refs/heads/beeline-cli
Commit: 66feedc5569de959a383e0a58d9e8768bbad0e2c
Parents: 5c94bda
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Mon Jul 13 09:11:28 2015 -0700
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Mon Jul 13 09:11:28 2015 -0700

----------------------------------------------------------------------
 .../streaming/AbstractRecordWriter.java         |   4 +-
 .../streaming/mutate/worker/MutatorImpl.java    |   4 +-
 .../java/org/apache/hadoop/hive/ql/Driver.java  |   1 +
 .../hadoop/hive/ql/io/AcidInputFormat.java      |  60 +++++++-
 .../hadoop/hive/ql/io/AcidOutputFormat.java     |  49 +++++-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 152 +++++++++++++++----
 .../hadoop/hive/ql/io/HiveFileFormatUtils.java  |  19 +--
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  20 +--
 .../hadoop/hive/ql/io/orc/OrcNewSplit.java      |  13 +-
 .../hive/ql/io/orc/OrcRawRecordMerger.java      |  66 ++++++--
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |  58 +++++++
 .../apache/hadoop/hive/ql/io/orc/OrcSplit.java  |  16 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |  20 ++-
 .../hadoop/hive/ql/lockmgr/DummyTxnManager.java |   4 +
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java  |   3 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   3 +-
 .../hadoop/hive/ql/plan/FileSinkDesc.java       |  27 +++-
 .../hive/ql/txn/compactor/CompactorMR.java      |   4 +-
 .../hive/ql/exec/TestFileSinkOperator.java      |   3 +-
 .../apache/hadoop/hive/ql/io/TestAcidUtils.java |  73 ++++++++-
 .../hive/ql/io/orc/TestInputOutputFormat.java   |  13 +-
 .../hive/ql/io/orc/TestOrcRawRecordMerger.java  |  57 ++++---
 .../hive/ql/io/orc/TestOrcRecordUpdater.java    |   6 +-
 .../hive/ql/txn/compactor/CompactorTest.java    |  20 ++-
 .../hive/ql/txn/compactor/TestCleaner.java      |   8 +-
 .../hive/ql/txn/compactor/TestCleaner2.java     |  14 ++
 .../hive/ql/txn/compactor/TestInitiator.java    |   4 +
 .../hive/ql/txn/compactor/TestWorker.java       |  49 +++---
 .../hive/ql/txn/compactor/TestWorker2.java      |  16 ++
 29 files changed, 645 insertions(+), 141 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
index ed46bca..c959222 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
@@ -143,7 +143,9 @@ abstract class AbstractRecordWriter implements RecordWriter {
                       .inspector(getSerde().getObjectInspector())
                       .bucket(bucketId)
                       .minimumTransactionId(minTxnId)
-                      .maximumTransactionId(maxTxnID));
+                      .maximumTransactionId(maxTxnID)
+                      .statementId(-1)
+                      .finalDestination(partitionPath));
     } catch (SerDeException e) {
       throw new SerializationError("Failed to get object inspector from Serde "
               + getSerde().getClass().getName(), e);

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java
index 0fe41d5..52062f8 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java
@@ -78,7 +78,9 @@ public class MutatorImpl implements Mutator {
             .bucket(bucketId)
             .minimumTransactionId(transactionId)
             .maximumTransactionId(transactionId)
-            .recordIdColumn(recordIdColumn));
+            .recordIdColumn(recordIdColumn)
+            .finalDestination(partitionPath)
+            .statementId(-1));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 934cb42..b74e5fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -988,6 +988,7 @@ public class Driver implements CommandProcessor {
         if (acidSinks != null) {
           for (FileSinkDesc desc : acidSinks) {
             desc.setTransactionId(txnId);
+            desc.setStatementId(txnMgr.getStatementId());
           }
         }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
index e1d2395..24506b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
@@ -22,13 +22,19 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 
+import java.io.DataInput;
+import java.io.DataOutput;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
 
 /**
  * The interface required for input formats that what to support ACID
@@ -62,7 +68,7 @@ import java.io.IOException;
  *   <li>New format -
  *     <pre>
  *        $partition/base_$tid/$bucket
- *                   delta_$tid_$tid/$bucket
+ *                   delta_$tid_$tid_$stid/$bucket
  *     </pre></li>
  * </ul>
  * <p>
@@ -71,6 +77,8 @@ import java.io.IOException;
  * stored sorted by the original transaction id (ascending), bucket (ascending),
  * row id (ascending), and current transaction id (descending). Thus the files
  * can be merged by advancing through the files in parallel.
+ * The stid is unique id (within the transaction) of the statement that created
+ * this delta file.
  * <p>
  * The base files include all transactions from the beginning of time
  * (transaction id 0) to the transaction in the directory name. Delta
@@ -91,7 +99,7 @@ import java.io.IOException;
  *   For row-at-a-time processing, KEY can conveniently pass RowId into the operator
  *   pipeline.  For vectorized execution the KEY could perhaps represent a range in the batch.
  *   Since {@link org.apache.hadoop.hive.ql.io.orc.OrcInputFormat} is declared to return
- *   {@code NullWritable} key, {@link org.apache.hadoop.hive.ql.io.AcidRecordReader} is defined
+ *   {@code NullWritable} key, {@link org.apache.hadoop.hive.ql.io.AcidInputFormat.AcidRecordReader} is defined
  *   to provide access to the RowId.  Other implementations of AcidInputFormat can use either
  *   mechanism.
  * </p>
@@ -101,6 +109,54 @@ import java.io.IOException;
 public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
     extends InputFormat<KEY, VALUE>, InputFormatChecker {
 
+  static final class DeltaMetaData implements Writable {
+    private long minTxnId;
+    private long maxTxnId;
+    private List<Integer> stmtIds;
+    
+    public DeltaMetaData() {
+      this(0,0,null);
+    }
+    DeltaMetaData(long minTxnId, long maxTxnId, List<Integer> stmtIds) {
+      this.minTxnId = minTxnId;
+      this.maxTxnId = maxTxnId;
+      this.stmtIds = stmtIds;
+    }
+    long getMinTxnId() {
+      return minTxnId;
+    }
+    long getMaxTxnId() {
+      return maxTxnId;
+    }
+    List<Integer> getStmtIds() {
+      return stmtIds;
+    }
+    @Override
+    public void write(DataOutput out) throws IOException {
+      out.writeLong(minTxnId);
+      out.writeLong(maxTxnId);
+      out.writeInt(stmtIds.size());
+      if(stmtIds == null) {
+        return;
+      }
+      for(Integer id : stmtIds) {
+        out.writeInt(id);
+      }
+    }
+    @Override
+    public void readFields(DataInput in) throws IOException {
+      minTxnId = in.readLong();
+      maxTxnId = in.readLong();
+      int numStatements = in.readInt();
+      if(numStatements <= 0) {
+        return;
+      }
+      stmtIds = new ArrayList<>();
+      for(int i = 0; i < numStatements; i++) {
+        stmtIds.add(in.readInt());
+      }
+    }
+  }
   /**
    * Options for controlling the record readers.
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
index 0d537e1..dd90a95 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
@@ -39,7 +39,7 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
   /**
    * Options to control how the files are written
    */
-  public static class Options {
+  public static class Options implements Cloneable {
     private final Configuration configuration;
     private FileSystem fs;
     private ObjectInspector inspector;
@@ -53,7 +53,9 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
     private PrintStream dummyStream = null;
     private boolean oldStyle = false;
     private int recIdCol = -1;  // Column the record identifier is in, -1 indicates no record id
-
+    //unique within a transaction
+    private int statementId = 0;
+    private Path finalDestination;
     /**
      * Create the options object.
      * @param conf Use the given configuration
@@ -63,6 +65,18 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
     }
 
     /**
+     * shallow clone
+     */
+    @Override
+    public Options clone() {
+      try {
+        return (Options)super.clone();
+      }
+      catch(CloneNotSupportedException ex) {
+        throw new RuntimeException("clone() not properly implemented: " + ex.getMessage(), ex);
+      }
+    }
+    /**
      * Use the given ObjectInspector for each record written.
      * @param inspector the inspector to use.
      * @return this
@@ -185,6 +199,31 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
       return this;
     }
 
+    /**
+     * @since 1.3.0
+     * This can be set to -1 to make the system generate old style (delta_xxxx_yyyy) file names.
+     * This is primarily needed for testing to make sure 1.3 code can still read files created
+     * by older code.  Also used by Comactor.
+     */
+    public Options statementId(int id) {
+      if(id >= AcidUtils.MAX_STATEMENTS_PER_TXN) {
+        throw new RuntimeException("Too many statements for transactionId: " + maximumTransactionId);
+      }
+      if(id < -1) {
+        throw new IllegalArgumentException("Illegal statementId value: " + id);
+      }
+      this.statementId = id;
+      return this;
+    }
+    /**
+     * @param p where the data for this operation will eventually end up;
+     *          basically table or partition directory in FS
+     */
+    public Options finalDestination(Path p) {
+      this.finalDestination = p;
+      return this;
+    }
+    
     public Configuration getConfiguration() {
       return configuration;
     }
@@ -236,6 +275,12 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
     boolean getOldStyle() {
       return oldStyle;
     }
+    public int getStatementId() {
+      return statementId;
+    }
+    public Path getFinalDestination() {
+      return finalDestination;
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 2214733..c7e0780 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -67,6 +67,15 @@ public class AcidUtils {
   };
   public static final String BUCKET_DIGITS = "%05d";
   public static final String DELTA_DIGITS = "%07d";
+  /**
+   * 10K statements per tx.  Probably overkill ... since that many delta files
+   * would not be good for performance
+   */
+  public static final String STATEMENT_DIGITS = "%04d";
+  /**
+   * This must be in sync with {@link #STATEMENT_DIGITS}
+   */
+  public static final int MAX_STATEMENTS_PER_TXN = 10000;
   public static final Pattern BUCKET_DIGIT_PATTERN = Pattern.compile("[0-9]{5}$");
   public static final Pattern LEGACY_BUCKET_DIGIT_PATTERN = Pattern.compile("^[0-9]{5}");
   public static final PathFilter originalBucketFilter = new PathFilter() {
@@ -79,7 +88,7 @@ public class AcidUtils {
   private AcidUtils() {
     // NOT USED
   }
-  private static final Log LOG = LogFactory.getLog(AcidUtils.class.getName());
+  private static final Log LOG = LogFactory.getLog(AcidUtils.class);
 
   private static final Pattern ORIGINAL_PATTERN =
       Pattern.compile("[0-9]+_[0-9]+");
@@ -104,12 +113,23 @@ public class AcidUtils {
         BUCKET_PREFIX + String.format(BUCKET_DIGITS, bucket));
   }
 
-  private static String deltaSubdir(long min, long max) {
+  /**
+   * This is format of delta dir name prior to Hive 1.3.x
+   */
+  public static String deltaSubdir(long min, long max) {
     return DELTA_PREFIX + String.format(DELTA_DIGITS, min) + "_" +
         String.format(DELTA_DIGITS, max);
   }
 
   /**
+   * Each write statement in a transaction creates its own delta dir.
+   * @since 1.3.x
+   */
+  public static String deltaSubdir(long min, long max, int statementId) {
+    return deltaSubdir(min, max) + "_" + String.format(STATEMENT_DIGITS, statementId);
+  }
+
+  /**
    * Create a filename for a bucket file.
    * @param directory the partition directory
    * @param options the options for writing the bucket
@@ -124,9 +144,15 @@ public class AcidUtils {
     } else if (options.isWritingBase()) {
       subdir = BASE_PREFIX + String.format(DELTA_DIGITS,
           options.getMaximumTransactionId());
+    } else if(options.getStatementId() == -1) {
+      //when minor compaction runs, we collapse per statement delta files inside a single
+      //transaction so we no longer need a statementId in the file name
+      subdir = deltaSubdir(options.getMinimumTransactionId(),
+        options.getMaximumTransactionId());
     } else {
       subdir = deltaSubdir(options.getMinimumTransactionId(),
-          options.getMaximumTransactionId());
+        options.getMaximumTransactionId(),
+        options.getStatementId());
     }
     return createBucketFile(new Path(directory, subdir), options.getBucket());
   }
@@ -214,14 +240,24 @@ public class AcidUtils {
   }
 
   public static class ParsedDelta implements Comparable<ParsedDelta> {
-    final long minTransaction;
-    final long maxTransaction;
-    final FileStatus path;
+    private final long minTransaction;
+    private final long maxTransaction;
+    private final FileStatus path;
+    //-1 is for internal (getAcidState()) purposes and means the delta dir
+    //had no statement ID
+    private final int statementId;
 
+    /**
+     * for pre 1.3.x delta files
+     */
     ParsedDelta(long min, long max, FileStatus path) {
+      this(min, max, path, -1);
+    }
+    ParsedDelta(long min, long max, FileStatus path, int statementId) {
       this.minTransaction = min;
       this.maxTransaction = max;
       this.path = path;
+      this.statementId = statementId;
     }
 
     public long getMinTransaction() {
@@ -236,6 +272,16 @@ public class AcidUtils {
       return path.getPath();
     }
 
+    public int getStatementId() {
+      return statementId == -1 ? 0 : statementId;
+    }
+
+    /**
+     * Compactions (Major/Minor) merge deltas/bases but delete of old files
+     * happens in a different process; thus it's possible to have bases/deltas with
+     * overlapping txnId boundaries.  The sort order helps figure out the "best" set of files
+     * to use to get data.
+     */
     @Override
     public int compareTo(ParsedDelta parsedDelta) {
       if (minTransaction != parsedDelta.minTransaction) {
@@ -250,7 +296,22 @@ public class AcidUtils {
         } else {
           return -1;
         }
-      } else {
+      }
+      else if(statementId != parsedDelta.statementId) {
+        /**
+         * We want deltas after minor compaction (w/o statementId) to sort
+         * earlier so that getAcidState() considers compacted files (into larger ones) obsolete
+         * Before compaction, include deltas with all statementIds for a given txnId
+         * in a {@link org.apache.hadoop.hive.ql.io.AcidUtils.Directory}
+         */
+        if(statementId < parsedDelta.statementId) {
+          return -1;
+        }
+        else {
+          return 1;
+        }
+      }
+      else {
         return path.compareTo(parsedDelta.path);
       }
     }
@@ -271,46 +332,72 @@ public class AcidUtils {
 
   /**
    * Convert the list of deltas into an equivalent list of begin/end
-   * transaction id pairs.
+   * transaction id pairs.  Assumes {@code deltas} is sorted.
    * @param deltas
    * @return the list of transaction ids to serialize
    */
-  public static List<Long> serializeDeltas(List<ParsedDelta> deltas) {
-    List<Long> result = new ArrayList<Long>(deltas.size() * 2);
-    for(ParsedDelta delta: deltas) {
-      result.add(delta.minTransaction);
-      result.add(delta.maxTransaction);
+  public static List<AcidInputFormat.DeltaMetaData> serializeDeltas(List<ParsedDelta> deltas) {
+    List<AcidInputFormat.DeltaMetaData> result = new ArrayList<>(deltas.size());
+    AcidInputFormat.DeltaMetaData last = null;
+    for(ParsedDelta parsedDelta : deltas) {
+      if(last != null && last.getMinTxnId() == parsedDelta.getMinTransaction() && last.getMaxTxnId() == parsedDelta.getMaxTransaction()) {
+        last.getStmtIds().add(parsedDelta.getStatementId());
+        continue;
+      }
+      last = new AcidInputFormat.DeltaMetaData(parsedDelta.getMinTransaction(), parsedDelta.getMaxTransaction(), new ArrayList<Integer>());
+      result.add(last);
+      if(parsedDelta.statementId >= 0) {
+        last.getStmtIds().add(parsedDelta.getStatementId());
+      }
     }
     return result;
   }
 
   /**
    * Convert the list of begin/end transaction id pairs to a list of delta
-   * directories.
+   * directories.  Note that there may be multiple delta files for the exact same txn range starting
+   * with 1.3.x;
+   * see {@link org.apache.hadoop.hive.ql.io.AcidUtils#deltaSubdir(long, long, int)}
    * @param root the root directory
    * @param deltas list of begin/end transaction id pairs
    * @return the list of delta paths
    */
-  public static Path[] deserializeDeltas(Path root, List<Long> deltas) {
-    int deltaSize = deltas.size() / 2;
-    Path[] result = new Path[deltaSize];
-    for(int i = 0; i < deltaSize; ++i) {
-      result[i] = new Path(root, deltaSubdir(deltas.get(i * 2),
-          deltas.get(i * 2 + 1)));
+  public static Path[] deserializeDeltas(Path root, final List<AcidInputFormat.DeltaMetaData> deltas) throws IOException {
+    List<Path> results = new ArrayList<Path>(deltas.size());
+    for(AcidInputFormat.DeltaMetaData dmd : deltas) {
+      if(dmd.getStmtIds().isEmpty()) {
+        results.add(new Path(root, deltaSubdir(dmd.getMinTxnId(), dmd.getMaxTxnId())));
+        continue;
+      }
+      for(Integer stmtId : dmd.getStmtIds()) {
+        results.add(new Path(root, deltaSubdir(dmd.getMinTxnId(), dmd.getMaxTxnId(), stmtId)));
+      }
     }
-    return result;
+    return results.toArray(new Path[results.size()]);
   }
 
-  static ParsedDelta parseDelta(FileStatus path) {
-    String filename = path.getPath().getName();
+  private static ParsedDelta parseDelta(FileStatus path) {
+    ParsedDelta p = parsedDelta(path.getPath());
+    return new ParsedDelta(p.getMinTransaction(),
+      p.getMaxTransaction(), path, p.statementId);
+  }
+  public static ParsedDelta parsedDelta(Path deltaDir) {
+    String filename = deltaDir.getName();
     if (filename.startsWith(DELTA_PREFIX)) {
       String rest = filename.substring(DELTA_PREFIX.length());
       int split = rest.indexOf('_');
+      int split2 = rest.indexOf('_', split + 1);//may be -1 if no statementId
       long min = Long.parseLong(rest.substring(0, split));
-      long max = Long.parseLong(rest.substring(split + 1));
-      return new ParsedDelta(min, max, path);
+      long max = split2 == -1 ?
+        Long.parseLong(rest.substring(split + 1)) :
+        Long.parseLong(rest.substring(split + 1, split2));
+      if(split2 == -1) {
+        return new ParsedDelta(min, max, null);
+      }
+      int statementId = Integer.parseInt(rest.substring(split2 + 1));
+      return new ParsedDelta(min, max, null, statementId);
     }
-    throw new IllegalArgumentException(path + " does not start with " +
+    throw new IllegalArgumentException(deltaDir + " does not start with " +
                                        DELTA_PREFIX);
   }
 
@@ -407,15 +494,24 @@ public class AcidUtils {
 
     Collections.sort(working);
     long current = bestBaseTxn;
+    int lastStmtId = -1;
     for(ParsedDelta next: working) {
       if (next.maxTransaction > current) {
         // are any of the new transactions ones that we care about?
         if (txnList.isTxnRangeValid(current+1, next.maxTransaction) !=
-            ValidTxnList.RangeResponse.NONE) {
+          ValidTxnList.RangeResponse.NONE) {
           deltas.add(next);
           current = next.maxTransaction;
+          lastStmtId = next.statementId;
         }
-      } else {
+      }
+      else if(next.maxTransaction == current && lastStmtId >= 0) {
+        //make sure to get all deltas within a single transaction;  multi-statement txn
+        //generate multiple delta files with the same txnId range
+        //of course, if maxTransaction has already been minor compacted, all per statement deltas are obsolete
+        deltas.add(next);
+      }
+      else {
         obsolete.add(next.path);
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
index 7ad5aa0..50ba740 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
@@ -297,31 +297,32 @@ public final class HiveFileFormatUtils {
     // TODO not 100% sure about this.  This call doesn't set the compression type in the conf
     // file the way getHiveRecordWriter does, as ORC appears to read the value for itself.  Not
     // sure if this is correct or not.
-    return getRecordUpdater(jc, acidOutputFormat, conf.getCompressed(), conf.getTransactionId(),
-        bucket, inspector, tableInfo.getProperties(), outPath, reporter, rowIdColNum);
+    return getRecordUpdater(jc, acidOutputFormat,
+        bucket, inspector, tableInfo.getProperties(), outPath, reporter, rowIdColNum, conf);
   }
 
 
   private static RecordUpdater getRecordUpdater(JobConf jc,
                                                 AcidOutputFormat<?, ?> acidOutputFormat,
-                                                boolean isCompressed,
-                                                long txnId,
                                                 int bucket,
                                                 ObjectInspector inspector,
                                                 Properties tableProp,
                                                 Path outPath,
                                                 Reporter reporter,
-                                                int rowIdColNum) throws IOException {
+                                                int rowIdColNum,
+                                                FileSinkDesc conf) throws IOException {
     return acidOutputFormat.getRecordUpdater(outPath, new AcidOutputFormat.Options(jc)
-        .isCompressed(isCompressed)
+        .isCompressed(conf.getCompressed())
         .tableProperties(tableProp)
         .reporter(reporter)
         .writingBase(false)
-        .minimumTransactionId(txnId)
-        .maximumTransactionId(txnId)
+        .minimumTransactionId(conf.getTransactionId())
+        .maximumTransactionId(conf.getTransactionId())
         .bucket(bucket)
         .inspector(inspector)
-        .recordIdColumn(rowIdColNum));
+        .recordIdColumn(rowIdColNum)
+        .statementId(conf.getStatementId())
+        .finalDestination(conf.getDestPath()));
   }
 
   public static PartitionDesc getPartitionDescFromPathRecursively(

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 8864013..3a9e64e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -439,13 +439,13 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
     private final FileStatus file;
     private final FileInfo fileInfo;
     private final boolean isOriginal;
-    private final List<Long> deltas;
+    private final List<DeltaMetaData> deltas;
     private final boolean hasBase;
 
     SplitInfo(Context context, FileSystem fs,
         FileStatus file, FileInfo fileInfo,
         boolean isOriginal,
-        List<Long> deltas,
+        List<DeltaMetaData> deltas,
         boolean hasBase, Path dir, boolean[] covered) throws IOException {
       super(dir, context.numBuckets, deltas, covered);
       this.context = context;
@@ -467,12 +467,12 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
     FileSystem fs;
     List<FileStatus> files;
     boolean isOriginal;
-    List<Long> deltas;
+    List<DeltaMetaData> deltas;
     Path dir;
     boolean[] covered;
 
     public ETLSplitStrategy(Context context, FileSystem fs, Path dir, List<FileStatus> children,
-        boolean isOriginal, List<Long> deltas, boolean[] covered) {
+        boolean isOriginal, List<DeltaMetaData> deltas, boolean[] covered) {
       this.context = context;
       this.dir = dir;
       this.fs = fs;
@@ -543,14 +543,14 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
   static final class BISplitStrategy extends ACIDSplitStrategy {
     List<FileStatus> fileStatuses;
     boolean isOriginal;
-    List<Long> deltas;
+    List<DeltaMetaData> deltas;
     FileSystem fs;
     Context context;
     Path dir;
 
     public BISplitStrategy(Context context, FileSystem fs,
         Path dir, List<FileStatus> fileStatuses, boolean isOriginal,
-        List<Long> deltas, boolean[] covered) {
+        List<DeltaMetaData> deltas, boolean[] covered) {
       super(dir, context.numBuckets, deltas, covered);
       this.context = context;
       this.fileStatuses = fileStatuses;
@@ -587,11 +587,11 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
    */
   static class ACIDSplitStrategy implements SplitStrategy<OrcSplit> {
     Path dir;
-    List<Long> deltas;
+    List<DeltaMetaData> deltas;
     boolean[] covered;
     int numBuckets;
 
-    public ACIDSplitStrategy(Path dir, int numBuckets, List<Long> deltas, boolean[] covered) {
+    public ACIDSplitStrategy(Path dir, int numBuckets, List<DeltaMetaData> deltas, boolean[] covered) {
       this.dir = dir;
       this.numBuckets = numBuckets;
       this.deltas = deltas;
@@ -640,7 +640,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
       final SplitStrategy splitStrategy;
       AcidUtils.Directory dirInfo = AcidUtils.getAcidState(dir,
           context.conf, context.transactionList);
-      List<Long> deltas = AcidUtils.serializeDeltas(dirInfo.getCurrentDirectories());
+      List<DeltaMetaData> deltas = AcidUtils.serializeDeltas(dirInfo.getCurrentDirectories());
       Path base = dirInfo.getBaseDirectory();
       List<FileStatus> original = dirInfo.getOriginalFiles();
       boolean[] covered = new boolean[context.numBuckets];
@@ -718,7 +718,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
     private Metadata metadata;
     private List<OrcProto.Type> types;
     private final boolean isOriginal;
-    private final List<Long> deltas;
+    private final List<DeltaMetaData> deltas;
     private final boolean hasBase;
     private OrcFile.WriterVersion writerVersion;
     private long projColsUncompressedSize;

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java
index da23544..b58c880 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java
@@ -24,6 +24,7 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
@@ -37,7 +38,7 @@ public class OrcNewSplit extends FileSplit {
   private boolean hasFooter;
   private boolean isOriginal;
   private boolean hasBase;
-  private final List<Long> deltas = new ArrayList<Long>();
+  private final List<AcidInputFormat.DeltaMetaData> deltas = new ArrayList<>();
   private OrcFile.WriterVersion writerVersion;
 
   protected OrcNewSplit(){
@@ -67,8 +68,8 @@ public class OrcNewSplit extends FileSplit {
         (hasFooter ? OrcSplit.FOOTER_FLAG : 0);
     out.writeByte(flags);
     out.writeInt(deltas.size());
-    for(Long delta: deltas) {
-      out.writeLong(delta);
+    for(AcidInputFormat.DeltaMetaData delta: deltas) {
+      delta.write(out);
     }
     if (hasFooter) {
       // serialize FileMetaInfo fields
@@ -101,7 +102,9 @@ public class OrcNewSplit extends FileSplit {
     deltas.clear();
     int numDeltas = in.readInt();
     for(int i=0; i < numDeltas; i++) {
-      deltas.add(in.readLong());
+      AcidInputFormat.DeltaMetaData dmd = new AcidInputFormat.DeltaMetaData();
+      dmd.readFields(in);
+      deltas.add(dmd);
     }
     if (hasFooter) {
       // deserialize FileMetaInfo fields
@@ -137,7 +140,7 @@ public class OrcNewSplit extends FileSplit {
     return hasBase;
   }
 
-  public List<Long> getDeltas() {
+  public List<AcidInputFormat.DeltaMetaData> getDeltas() {
     return deltas;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
index 728118a..2f11611 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
@@ -72,41 +72,55 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
   /**
    * A RecordIdentifier extended with the current transaction id. This is the
    * key of our merge sort with the originalTransaction, bucket, and rowId
-   * ascending and the currentTransaction descending. This means that if the
+   * ascending and the currentTransaction, statementId descending. This means that if the
    * reader is collapsing events to just the last update, just the first
    * instance of each record is required.
    */
   final static class ReaderKey extends RecordIdentifier{
     private long currentTransactionId;
+    private int statementId;//sort on this descending, like currentTransactionId
 
     public ReaderKey() {
-      this(-1, -1, -1, -1);
+      this(-1, -1, -1, -1, 0);
     }
 
     public ReaderKey(long originalTransaction, int bucket, long rowId,
                      long currentTransactionId) {
+      this(originalTransaction, bucket, rowId, currentTransactionId, 0);
+    }
+    /**
+     * @param statementId - set this to 0 if N/A
+     */
+    public ReaderKey(long originalTransaction, int bucket, long rowId,
+                     long currentTransactionId, int statementId) {
       super(originalTransaction, bucket, rowId);
       this.currentTransactionId = currentTransactionId;
+      this.statementId = statementId;
     }
 
     @Override
     public void set(RecordIdentifier other) {
       super.set(other);
       currentTransactionId = ((ReaderKey) other).currentTransactionId;
+      statementId = ((ReaderKey) other).statementId;
     }
 
     public void setValues(long originalTransactionId,
                           int bucket,
                           long rowId,
-                          long currentTransactionId) {
+                          long currentTransactionId,
+                          int statementId) {
       setValues(originalTransactionId, bucket, rowId);
       this.currentTransactionId = currentTransactionId;
+      this.statementId = statementId;
     }
 
     @Override
     public boolean equals(Object other) {
       return super.equals(other) &&
-          currentTransactionId == ((ReaderKey) other).currentTransactionId;
+          currentTransactionId == ((ReaderKey) other).currentTransactionId
+            && statementId == ((ReaderKey) other).statementId//consistent with compareTo()
+          ;
     }
 
     @Override
@@ -118,6 +132,9 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
           if (currentTransactionId != oth.currentTransactionId) {
             return currentTransactionId < oth.currentTransactionId ? +1 : -1;
           }
+          if(statementId != oth.statementId) {
+            return statementId < oth.statementId ? +1 : -1;
+          }
         } else {
           return -1;
         }
@@ -125,6 +142,13 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
       return sup;
     }
 
+    /**
+     * This means 1 txn modified the same row more than once
+     */
+    private boolean isSameRow(ReaderKey other) {
+      return compareRow(other) == 0 && currentTransactionId == other.currentTransactionId;
+    }
+    
     public long getCurrentTransactionId() {
       return currentTransactionId;
     }
@@ -142,7 +166,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
     public String toString() {
       return "{originalTxn: " + getTransactionId() + ", bucket: " +
           getBucketId() + ", row: " + getRowId() + ", currentTxn: " +
-          currentTransactionId + "}";
+          currentTransactionId + ", statementId: "+ statementId + "}";
     }
   }
 
@@ -159,6 +183,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
     final ReaderKey key;
     final RecordIdentifier maxKey;
     final int bucket;
+    private final int statementId;
 
     /**
      * Create a reader that reads from the first key larger than minKey to any
@@ -170,17 +195,19 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
      * @param maxKey only return keys less than or equal to maxKey if it is
      *               non-null
      * @param options options to provide to read the rows.
+     * @param statementId id of SQL statement within a transaction
      * @throws IOException
      */
     ReaderPair(ReaderKey key, Reader reader, int bucket,
                RecordIdentifier minKey, RecordIdentifier maxKey,
-               ReaderImpl.Options options) throws IOException {
+               ReaderImpl.Options options, int statementId) throws IOException {
       this.reader = reader;
       this.key = key;
       this.maxKey = maxKey;
       this.bucket = bucket;
       // TODO use stripe statistics to jump over stripes
       recordReader = reader.rowsOptions(options);
+      this.statementId = statementId;
       // advance the reader until we reach the minimum key
       do {
         next(nextRecord);
@@ -195,7 +222,8 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
         key.setValues(OrcRecordUpdater.getOriginalTransaction(nextRecord),
             OrcRecordUpdater.getBucket(nextRecord),
             OrcRecordUpdater.getRowId(nextRecord),
-            OrcRecordUpdater.getCurrentTransaction(nextRecord));
+            OrcRecordUpdater.getCurrentTransaction(nextRecord),
+            statementId);
 
         // if this record is larger than maxKey, we need to stop
         if (maxKey != null && key.compareRow(maxKey) > 0) {
@@ -223,7 +251,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
     OriginalReaderPair(ReaderKey key, Reader reader, int bucket,
                        RecordIdentifier minKey, RecordIdentifier maxKey,
                        Reader.Options options) throws IOException {
-      super(key, reader, bucket, minKey, maxKey, options);
+      super(key, reader, bucket, minKey, maxKey, options, 0);
     }
 
     @Override
@@ -263,7 +291,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
           nextRecord.setFieldValue(OrcRecordUpdater.ROW,
               recordReader.next(OrcRecordUpdater.getRow(next)));
         }
-        key.setValues(0L, bucket, nextRowId, 0L);
+        key.setValues(0L, bucket, nextRowId, 0L, 0);
         if (maxKey != null && key.compareRow(maxKey) > 0) {
           if (LOG.isDebugEnabled()) {
             LOG.debug("key " + key + " > maxkey " + maxKey);
@@ -415,7 +443,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
     this.offset = options.getOffset();
     this.length = options.getLength();
     this.validTxnList = validTxnList;
-    // modify the optins to reflect the event instead of the base row
+    // modify the options to reflect the event instead of the base row
     Reader.Options eventOptions = createEventOptions(options);
     if (reader == null) {
       baseReader = null;
@@ -438,7 +466,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
                                       options);
       } else {
         pair = new ReaderPair(key, reader, bucket, minKey, maxKey,
-                              eventOptions);
+                              eventOptions, 0);
       }
 
       // if there is at least one record, put it in the map
@@ -458,13 +486,14 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
       for(Path delta: deltaDirectory) {
         ReaderKey key = new ReaderKey();
         Path deltaFile = AcidUtils.createBucketFile(delta, bucket);
+        AcidUtils.ParsedDelta deltaDir = AcidUtils.parsedDelta(delta);
         FileSystem fs = deltaFile.getFileSystem(conf);
         long length = getLastFlushLength(fs, deltaFile);
         if (length != -1 && fs.exists(deltaFile)) {
           Reader deltaReader = OrcFile.createReader(deltaFile,
               OrcFile.readerOptions(conf).maxLength(length));
           ReaderPair deltaPair = new ReaderPair(key, deltaReader, bucket, minKey,
-            maxKey, eventOptions);
+            maxKey, eventOptions, deltaDir.getStatementId());
           if (deltaPair.nextRecord != null) {
             readers.put(key, deltaPair);
           }
@@ -580,9 +609,18 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
         continue;
       }
 
+      /*for multi-statement txns, you may have multiple events for the same
+      * row in the same (current) transaction.  We want to collapse these to just the last one
+      * regardless whether we are minor compacting.  Consider INSERT/UPDATE/UPDATE of the
+      * same row in the same txn.  There is no benefit passing along anything except the last
+      * event.  If we did want to pass it along, we'd have to include statementId in the row
+      * returned so that compaction could write it out or make minor minor compaction understand
+      * how to write out delta files in delta_xxx_yyy_stid format.  There doesn't seem to be any
+      * value in this.*/
+      boolean isSameRow = prevKey.isSameRow((ReaderKey)recordIdentifier);
       // if we are collapsing, figure out if this is a new row
-      if (collapse) {
-        keysSame = prevKey.compareRow(recordIdentifier) == 0;
+      if (collapse || isSameRow) {
+        keysSame = (collapse && prevKey.compareRow(recordIdentifier) == 0) || (isSameRow);
         if (!keysSame) {
           prevKey.set(recordIdentifier);
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index b576496..e4651b8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -89,6 +89,7 @@ public class OrcRecordUpdater implements RecordUpdater {
   private final IntWritable bucket = new IntWritable();
   private final LongWritable rowId = new LongWritable();
   private long insertedRows = 0;
+  private long rowIdOffset = 0;
   // This records how many rows have been inserted or deleted.  It is separate from insertedRows
   // because that is monotonically increasing to give new unique row ids.
   private long rowCountDelta = 0;
@@ -263,6 +264,41 @@ public class OrcRecordUpdater implements RecordUpdater {
     item.setFieldValue(ROW_ID, rowId);
   }
 
+  /**
+   * To handle multiple INSERT... statements in a single transaction, we want to make sure
+   * to generate unique {@code rowId} for all inserted rows of the transaction.
+   * @return largest rowId created by previous statements (maybe 0)
+   * @throws IOException
+   */
+  private long findRowIdOffsetForInsert() throws IOException {
+    /*
+    * 1. need to know bucket we are writing to
+    * 2. need to know which delta dir it's in
+    * Then,
+    * 1. find the same bucket file in previous delta dir for this txn
+    * 2. read the footer and get AcidStats which has insert count
+     * 2.1 if AcidStats.inserts>0 done
+     *  else go to previous delta file
+     *  For example, consider insert/update/insert case...*/
+    if(options.getStatementId() <= 0) {
+      return 0;//there is only 1 statement in this transaction (so far)
+    }
+    for(int pastStmt = options.getStatementId() - 1; pastStmt >= 0; pastStmt--) {
+      Path matchingBucket = AcidUtils.createFilename(options.getFinalDestination(), options.clone().statementId(pastStmt));
+      if(!fs.exists(matchingBucket)) {
+        continue;
+      }
+      Reader reader = OrcFile.createReader(matchingBucket, OrcFile.readerOptions(options.getConfiguration()));
+      //no close() on Reader?!
+      AcidStats acidStats = parseAcidStats(reader);
+      if(acidStats.inserts > 0) {
+        return acidStats.inserts;
+      }
+    }
+    //if we got here, we looked at all delta files in this txn, prior to current statement and didn't 
+    //find any inserts...
+    return 0;
+  }
   // Find the record identifier column (if there) and return a possibly new ObjectInspector that
   // will strain out the record id for the underlying writer.
   private ObjectInspector findRecId(ObjectInspector inspector, int rowIdColNum) {
@@ -304,6 +340,9 @@ public class OrcRecordUpdater implements RecordUpdater {
           recIdInspector.getStructFieldData(rowIdValue, originalTxnField));
       rowId = rowIdInspector.get(recIdInspector.getStructFieldData(rowIdValue, rowIdField));
     }
+    else if(operation == INSERT_OPERATION) {
+      rowId += rowIdOffset;
+    }
     this.rowId.set(rowId);
     this.originalTransaction.set(originalTransaction);
     item.setFieldValue(OrcRecordUpdater.ROW, (operation == DELETE_OPERATION ? null : row));
@@ -315,6 +354,9 @@ public class OrcRecordUpdater implements RecordUpdater {
   public void insert(long currentTransaction, Object row) throws IOException {
     if (this.currentTransaction.get() != currentTransaction) {
       insertedRows = 0;
+      //this method is almost no-op in hcatalog.streaming case since statementId == 0 is
+      //always true in that case
+      rowIdOffset = findRowIdOffsetForInsert();
     }
     addEvent(INSERT_OPERATION, currentTransaction, insertedRows++, row);
     rowCountDelta++;
@@ -407,6 +449,22 @@ public class OrcRecordUpdater implements RecordUpdater {
     }
     return result;
   }
+  /**
+   * {@link KeyIndexBuilder} creates these
+   */
+  static AcidStats parseAcidStats(Reader reader) {
+    String statsSerialized;
+    try {
+      ByteBuffer val =
+        reader.getMetadataValue(OrcRecordUpdater.ACID_STATS)
+          .duplicate();
+      statsSerialized = utf8Decoder.decode(val).toString();
+    } catch (CharacterCodingException e) {
+      throw new IllegalArgumentException("Bad string encoding for " +
+        OrcRecordUpdater.ACID_STATS, e);
+    }
+    return new AcidStats(statsSerialized);
+  }
 
   static class KeyIndexBuilder implements OrcFile.WriterCallback {
     StringBuilder lastKey = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
index 0c7dd40..8cf4cc0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
@@ -26,6 +26,8 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.AcidInputFormat;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapred.FileSplit;
@@ -41,7 +43,7 @@ public class OrcSplit extends FileSplit {
   private boolean hasFooter;
   private boolean isOriginal;
   private boolean hasBase;
-  private final List<Long> deltas = new ArrayList<Long>();
+  private final List<AcidInputFormat.DeltaMetaData> deltas = new ArrayList<>();
   private OrcFile.WriterVersion writerVersion;
   private long projColsUncompressedSize;
 
@@ -58,7 +60,7 @@ public class OrcSplit extends FileSplit {
 
   public OrcSplit(Path path, long offset, long length, String[] hosts,
       ReaderImpl.FileMetaInfo fileMetaInfo, boolean isOriginal, boolean hasBase,
-      List<Long> deltas, long projectedDataSize) {
+      List<AcidInputFormat.DeltaMetaData> deltas, long projectedDataSize) {
     super(path, offset, length, hosts);
     this.fileMetaInfo = fileMetaInfo;
     hasFooter = this.fileMetaInfo != null;
@@ -78,8 +80,8 @@ public class OrcSplit extends FileSplit {
         (hasFooter ? FOOTER_FLAG : 0);
     out.writeByte(flags);
     out.writeInt(deltas.size());
-    for(Long delta: deltas) {
-      out.writeLong(delta);
+    for(AcidInputFormat.DeltaMetaData delta: deltas) {
+      delta.write(out);
     }
     if (hasFooter) {
       // serialize FileMetaInfo fields
@@ -112,7 +114,9 @@ public class OrcSplit extends FileSplit {
     deltas.clear();
     int numDeltas = in.readInt();
     for(int i=0; i < numDeltas; i++) {
-      deltas.add(in.readLong());
+      AcidInputFormat.DeltaMetaData dmd = new AcidInputFormat.DeltaMetaData();
+      dmd.readFields(in);
+      deltas.add(dmd);
     }
     if (hasFooter) {
       // deserialize FileMetaInfo fields
@@ -148,7 +152,7 @@ public class OrcSplit extends FileSplit {
     return hasBase;
   }
 
-  public List<Long> getDeltas() {
+  public List<AcidInputFormat.DeltaMetaData> getDeltas() {
     return deltas;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index f8fff1a..445f606 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -52,6 +52,14 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   private DbLockManager lockMgr = null;
   private IMetaStoreClient client = null;
   private long txnId = 0;
+  /**
+   * assigns a unique monotonically increasing ID to each statement
+   * which is part of an open transaction.  This is used by storage
+   * layer (see {@link org.apache.hadoop.hive.ql.io.AcidUtils#deltaSubdir(long, long, int)})
+   * to keep apart multiple writes of the same data within the same transaction
+   * Also see {@link org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options}
+   */
+  private int statementId = -1;
 
   DbTxnManager() {
   }
@@ -69,6 +77,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
     init();
     try {
       txnId = client.openTxn(user);
+      statementId = 0;
       LOG.debug("Opened txn " + txnId);
       return txnId;
     } catch (TException e) {
@@ -222,7 +231,10 @@ public class DbTxnManager extends HiveTxnManagerImpl {
       return null;
     }
 
-    List<HiveLock> locks = new ArrayList<HiveLock>(1); 
+    List<HiveLock> locks = new ArrayList<HiveLock>(1);
+    if(txnId > 0) {
+      statementId++;
+    }
     LockState lockState = lockMgr.lock(rqstBuilder.build(), plan.getQueryId(), isBlocking, locks);
     ctx.setHiveLocks(locks);
     return lockState;
@@ -249,6 +261,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
           e);
     } finally {
       txnId = 0;
+      statementId = -1;
     }
   }
 
@@ -270,6 +283,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
           e);
     } finally {
       txnId = 0;
+      statementId = -1;
     }
   }
 
@@ -361,5 +375,9 @@ public class DbTxnManager extends HiveTxnManagerImpl {
       }
     }
   }
+  @Override
+  public int getStatementId() {
+    return statementId;
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
index 21ab8ee..1906982 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
@@ -54,6 +54,10 @@ class DummyTxnManager extends HiveTxnManagerImpl {
   }
 
   @Override
+  public int getStatementId() {
+    return 0;
+  }
+  @Override
   public HiveLockManager getLockManager() throws LockException {
     if (lockMgr == null) {
       boolean supportConcurrency =

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
index 2dd0c7d..6c3dc33 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
@@ -127,4 +127,7 @@ public interface HiveTxnManager {
    * @return true if this transaction manager does ACID
    */
   boolean supportsAcid();
+
+  int getStatementId();
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index b02374e..8516631 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6605,7 +6605,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       rsCtx.getNumFiles(),
       rsCtx.getTotalFiles(),
       rsCtx.getPartnCols(),
-      dpCtx);
+      dpCtx,
+      dest_path);
 
     // If this is an insert, update, or delete on an ACID table then mark that so the
     // FileSinkOperator knows how to properly write to it.

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index bb6cee5..f73b502 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@ -92,16 +92,21 @@ public class FileSinkDesc extends AbstractOperatorDesc {
   // Record what type of write this is.  Default is non-ACID (ie old style).
   private AcidUtils.Operation writeType = AcidUtils.Operation.NOT_ACID;
   private long txnId = 0;  // transaction id for this operation
+  private int statementId = -1;
 
   private transient Table table;
+  private Path destPath;
 
   public FileSinkDesc() {
   }
 
+  /**
+   * @param destPath - the final destination for data
+   */
   public FileSinkDesc(final Path dirName, final TableDesc tableInfo,
       final boolean compressed, final int destTableId, final boolean multiFileSpray,
       final boolean canBeMerged, final int numFiles, final int totalFiles,
-      final ArrayList<ExprNodeDesc> partitionCols, final DynamicPartitionCtx dpCtx) {
+      final ArrayList<ExprNodeDesc> partitionCols, final DynamicPartitionCtx dpCtx, Path destPath) {
 
     this.dirName = dirName;
     this.tableInfo = tableInfo;
@@ -114,6 +119,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
     this.partitionCols = partitionCols;
     this.dpCtx = dpCtx;
     this.dpSortState = DPSortState.NONE;
+    this.destPath = destPath;
   }
 
   public FileSinkDesc(final Path dirName, final TableDesc tableInfo,
@@ -135,7 +141,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
   public Object clone() throws CloneNotSupportedException {
     FileSinkDesc ret = new FileSinkDesc(dirName, tableInfo, compressed,
         destTableId, multiFileSpray, canBeMerged, numFiles, totalFiles,
-        partitionCols, dpCtx);
+        partitionCols, dpCtx, destPath);
     ret.setCompressCodec(compressCodec);
     ret.setCompressType(compressType);
     ret.setGatherStats(gatherStats);
@@ -231,9 +237,6 @@ public class FileSinkDesc extends AbstractOperatorDesc {
     return temporary;
   }
 
-  /**
-   * @param totalFiles the totalFiles to set
-   */
   public void setTemporary(boolean temporary) {
     this.temporary = temporary;
   }
@@ -438,11 +441,23 @@ public class FileSinkDesc extends AbstractOperatorDesc {
   public void setTransactionId(long id) {
     txnId = id;
   }
-
   public long getTransactionId() {
     return txnId;
   }
 
+  public void setStatementId(int id) {
+    statementId = id;
+  }
+  /**
+   * See {@link org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options#statementId(int)}
+   */
+  public int getStatementId() {
+    return statementId;
+  }
+  public Path getDestPath() {
+    return destPath;
+  }
+
   public Table getTable() {
     return table;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index c5f2d4d..6c77ba4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -545,7 +545,9 @@ public class CompactorMR {
             .reporter(reporter)
             .minimumTransactionId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE))
             .maximumTransactionId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE))
-            .bucket(bucket);
+            .bucket(bucket)
+            .statementId(-1);//setting statementId == -1 makes compacted delta files use
+        //delta_xxxx_yyyy format
 
         // Instantiate the underlying output format
         @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
index e400778..c6ae030 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
@@ -303,7 +303,8 @@ public class TestFileSinkOperator {
       Map<String, String> partColNames = new HashMap<String, String>(1);
       partColNames.put(PARTCOL_NAME, PARTCOL_NAME);
       dpCtx.setInputToDPCols(partColNames);
-      desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx);
+      //todo: does this need the finalDestination?
+      desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null);
     } else {
       desc = new FileSinkDesc(basePath, tableDesc, false);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
index 1e3df34..f8ded12 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
@@ -46,17 +46,23 @@ public class TestAcidUtils {
         AcidUtils.createFilename(p, options).toString());
     options.bucket(123);
     assertEquals("/tmp/00123_0",
-        AcidUtils.createFilename(p, options).toString());
+      AcidUtils.createFilename(p, options).toString());
     options.bucket(23)
         .minimumTransactionId(100)
         .maximumTransactionId(200)
         .writingBase(true)
         .setOldStyle(false);
     assertEquals("/tmp/base_0000200/bucket_00023",
-        AcidUtils.createFilename(p, options).toString());
+      AcidUtils.createFilename(p, options).toString());
     options.writingBase(false);
+    assertEquals("/tmp/delta_0000100_0000200_0000/bucket_00023",
+      AcidUtils.createFilename(p, options).toString());
+    options.statementId(-1);
     assertEquals("/tmp/delta_0000100_0000200/bucket_00023",
-        AcidUtils.createFilename(p, options).toString());
+      AcidUtils.createFilename(p, options).toString());
+    options.statementId(7);
+    assertEquals("/tmp/delta_0000100_0000200_0007/bucket_00023",
+      AcidUtils.createFilename(p, options).toString());
   }
 
   @Test
@@ -236,7 +242,6 @@ public class TestAcidUtils {
         new MockFile("mock:/tbl/part1/delta_40_60/bucket_0", 500, new byte[0]),
         new MockFile("mock:/tbl/part1/delta_0060_60/bucket_0", 500, new byte[0]),
         new MockFile("mock:/tbl/part1/delta_052_55/bucket_0", 500, new byte[0]),
-        new MockFile("mock:/tbl/part1/delta_40_60/bucket_0", 500, new byte[0]),
         new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0]));
     Path part = new MockPath(fs, "mock:/tbl/part1");
     AcidUtils.Directory dir =
@@ -254,6 +259,45 @@ public class TestAcidUtils {
     assertEquals("mock:/tbl/part1/delta_0000063_63", delts.get(3).getPath().toString());
   }
 
+  /**
+   * Hive 1.3.0 delta dir naming scheme which supports multi-statement txns
+   * @throws Exception
+   */
+  @Test
+  public void testOverlapingDelta2() throws Exception {
+    Configuration conf = new Configuration();
+    MockFileSystem fs = new MockFileSystem(conf,
+      new MockFile("mock:/tbl/part1/delta_0000063_63_0/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_000062_62_0/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_000062_62_3/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_00061_61_0/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_40_60/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_0060_60_1/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_0060_60_4/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_0060_60_7/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_052_55/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_058_58/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0]));
+    Path part = new MockPath(fs, "mock:/tbl/part1");
+    AcidUtils.Directory dir =
+      AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:"));
+    assertEquals("mock:/tbl/part1/base_50", dir.getBaseDirectory().toString());
+    List<FileStatus> obsolete = dir.getObsolete();
+    assertEquals(5, obsolete.size());
+    assertEquals("mock:/tbl/part1/delta_052_55", obsolete.get(0).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_058_58", obsolete.get(1).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_0060_60_1", obsolete.get(2).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_0060_60_4", obsolete.get(3).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_0060_60_7", obsolete.get(4).getPath().toString());
+    List<AcidUtils.ParsedDelta> delts = dir.getCurrentDirectories();
+    assertEquals(5, delts.size());
+    assertEquals("mock:/tbl/part1/delta_40_60", delts.get(0).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_00061_61_0", delts.get(1).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_000062_62_0", delts.get(2).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_000062_62_3", delts.get(3).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_0000063_63_0", delts.get(4).getPath().toString());
+  }
+
   @Test
   public void deltasWithOpenTxnInRead() throws Exception {
     Configuration conf = new Configuration();
@@ -268,6 +312,27 @@ public class TestAcidUtils {
     assertEquals("mock:/tbl/part1/delta_2_5", delts.get(1).getPath().toString());
   }
 
+  /**
+   * @since 1.3.0
+   * @throws Exception
+   */
+  @Test
+  public void deltasWithOpenTxnInRead2() throws Exception {
+    Configuration conf = new Configuration();
+    MockFileSystem fs = new MockFileSystem(conf,
+      new MockFile("mock:/tbl/part1/delta_1_1/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_2_5/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_4_4_1/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_4_4_3/bucket_0", 500, new byte[0]),
+      new MockFile("mock:/tbl/part1/delta_101_101_1/bucket_0", 500, new byte[0]));
+    Path part = new MockPath(fs, "mock:/tbl/part1");
+    AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:4"));
+    List<AcidUtils.ParsedDelta> delts = dir.getCurrentDirectories();
+    assertEquals(2, delts.size());
+    assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_2_5", delts.get(1).getPath().toString());
+  }
+
   @Test
   public void deltasWithOpenTxnsNotInCompact() throws Exception {
     Configuration conf = new Configuration();

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 56e5f9f..e96ab2a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
@@ -927,7 +928,7 @@ public class TestInputOutputFormat {
     OrcInputFormat.SplitGenerator splitter =
         new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
             fs.getFileStatus(new Path("/a/file")), null, true,
-            new ArrayList<Long>(), true, null, null));
+            new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null));
     OrcSplit result = splitter.createSplit(0, 200, null);
     assertEquals(0, result.getStart());
     assertEquals(200, result.getLength());
@@ -968,7 +969,7 @@ public class TestInputOutputFormat {
     OrcInputFormat.SplitGenerator splitter =
         new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
             fs.getFileStatus(new Path("/a/file")), null, true,
-            new ArrayList<Long>(), true, null, null));
+            new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null));
     List<OrcSplit> results = splitter.call();
     OrcSplit result = results.get(0);
     assertEquals(3, result.getStart());
@@ -990,7 +991,7 @@ public class TestInputOutputFormat {
     conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 0);
     context = new OrcInputFormat.Context(conf);
     splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
-      fs.getFileStatus(new Path("/a/file")), null, true, new ArrayList<Long>(),
+      fs.getFileStatus(new Path("/a/file")), null, true, new ArrayList<AcidInputFormat.DeltaMetaData>(),
         true, null, null));
     results = splitter.call();
     for(int i=0; i < stripeSizes.length; ++i) {
@@ -1497,7 +1498,7 @@ public class TestInputOutputFormat {
     Path partDir = new Path(conf.get("mapred.input.dir"));
     OrcRecordUpdater writer = new OrcRecordUpdater(partDir,
         new AcidOutputFormat.Options(conf).maximumTransactionId(10)
-            .writingBase(true).bucket(0).inspector(inspector));
+            .writingBase(true).bucket(0).inspector(inspector).finalDestination(partDir));
     for(int i=0; i < 100; ++i) {
       BigRow row = new BigRow(i);
       writer.insert(10, row);
@@ -1648,7 +1649,7 @@ public class TestInputOutputFormat {
     // write a base file in partition 0
     OrcRecordUpdater writer = new OrcRecordUpdater(partDir[0],
         new AcidOutputFormat.Options(conf).maximumTransactionId(10)
-            .writingBase(true).bucket(0).inspector(inspector));
+            .writingBase(true).bucket(0).inspector(inspector).finalDestination(partDir[0]));
     for(int i=0; i < 10; ++i) {
       writer.insert(10, new MyRow(i, 2 * i));
     }
@@ -1661,7 +1662,7 @@ public class TestInputOutputFormat {
     // write a delta file in partition 0
     writer = new OrcRecordUpdater(partDir[0],
         new AcidOutputFormat.Options(conf).maximumTransactionId(10)
-            .writingBase(true).bucket(1).inspector(inspector));
+            .writingBase(true).bucket(1).inspector(inspector).finalDestination(partDir[0]));
     for(int i=10; i < 20; ++i) {
       writer.insert(10, new MyRow(i, 2*i));
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
index 921e954..39f71f1 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
@@ -62,12 +62,12 @@ import static org.junit.Assert.assertNull;
 public class TestOrcRawRecordMerger {
 
   private static final Log LOG = LogFactory.getLog(TestOrcRawRecordMerger.class);
-
+//todo: why is statementId -1?
   @Test
   public void testOrdering() throws Exception {
     ReaderKey left = new ReaderKey(100, 200, 1200, 300);
     ReaderKey right = new ReaderKey();
-    right.setValues(100, 200, 1000, 200);
+    right.setValues(100, 200, 1000, 200,1);
     assertTrue(right.compareTo(left) < 0);
     assertTrue(left.compareTo(right) > 0);
     assertEquals(false, left.equals(right));
@@ -76,16 +76,16 @@ public class TestOrcRawRecordMerger {
     assertEquals(true, right.equals(left));
     right.setRowId(2000);
     assertTrue(right.compareTo(left) > 0);
-    left.setValues(1, 2, 3, 4);
-    right.setValues(100, 2, 3, 4);
+    left.setValues(1, 2, 3, 4,-1);
+    right.setValues(100, 2, 3, 4,-1);
     assertTrue(left.compareTo(right) < 0);
     assertTrue(right.compareTo(left) > 0);
-    left.setValues(1, 2, 3, 4);
-    right.setValues(1, 100, 3, 4);
+    left.setValues(1, 2, 3, 4,-1);
+    right.setValues(1, 100, 3, 4,-1);
     assertTrue(left.compareTo(right) < 0);
     assertTrue(right.compareTo(left) > 0);
-    left.setValues(1, 2, 3, 100);
-    right.setValues(1, 2, 3, 4);
+    left.setValues(1, 2, 3, 100,-1);
+    right.setValues(1, 2, 3, 4,-1);
     assertTrue(left.compareTo(right) < 0);
     assertTrue(right.compareTo(left) > 0);
 
@@ -177,7 +177,7 @@ public class TestOrcRawRecordMerger {
     RecordIdentifier minKey = new RecordIdentifier(10, 20, 30);
     RecordIdentifier maxKey = new RecordIdentifier(40, 50, 60);
     ReaderPair pair = new ReaderPair(key, reader, 20, minKey, maxKey,
-        new Reader.Options());
+        new Reader.Options(), 0);
     RecordReader recordReader = pair.recordReader;
     assertEquals(10, key.getTransactionId());
     assertEquals(20, key.getBucketId());
@@ -203,7 +203,7 @@ public class TestOrcRawRecordMerger {
     Reader reader = createMockReader();
 
     ReaderPair pair = new ReaderPair(key, reader, 20, null, null,
-        new Reader.Options());
+        new Reader.Options(), 0);
     RecordReader recordReader = pair.recordReader;
     assertEquals(10, key.getTransactionId());
     assertEquals(20, key.getBucketId());
@@ -489,7 +489,7 @@ public class TestOrcRawRecordMerger {
     // write the empty base
     AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
         .inspector(inspector).bucket(BUCKET).writingBase(true)
-        .maximumTransactionId(100);
+        .maximumTransactionId(100).finalDestination(root);
     of.getRecordUpdater(root, options).close(false);
 
     ValidTxnList txnList = new ValidReadTxnList("200:");
@@ -515,6 +515,10 @@ public class TestOrcRawRecordMerger {
    */
   @Test
   public void testNewBaseAndDelta() throws Exception {
+    testNewBaseAndDelta(false);
+    testNewBaseAndDelta(true);
+  }
+  private void testNewBaseAndDelta(boolean use130Format) throws Exception {
     final int BUCKET = 10;
     String[] values = new String[]{"first", "second", "third", "fourth",
                                    "fifth", "sixth", "seventh", "eighth",
@@ -532,7 +536,10 @@ public class TestOrcRawRecordMerger {
 
     // write the base
     AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
-        .inspector(inspector).bucket(BUCKET);
+        .inspector(inspector).bucket(BUCKET).finalDestination(root);
+    if(!use130Format) {
+      options.statementId(-1);
+    }
     RecordUpdater ru = of.getRecordUpdater(root,
         options.writingBase(true).maximumTransactionId(100));
     for(String v: values) {
@@ -554,7 +561,8 @@ public class TestOrcRawRecordMerger {
     AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList);
 
     assertEquals(new Path(root, "base_0000100"), directory.getBaseDirectory());
-    assertEquals(new Path(root, "delta_0000200_0000200"),
+    assertEquals(new Path(root, use130Format ?
+        AcidUtils.deltaSubdir(200,200,0) : AcidUtils.deltaSubdir(200,200)),
         directory.getCurrentDirectories().get(0).getPath());
 
     Path basePath = AcidUtils.createBucketFile(directory.getBaseDirectory(),
@@ -829,7 +837,7 @@ public class TestOrcRawRecordMerger {
     // write a delta
     AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
         .writingBase(false).minimumTransactionId(1).maximumTransactionId(1)
-        .bucket(BUCKET).inspector(inspector).filesystem(fs).recordIdColumn(5);
+        .bucket(BUCKET).inspector(inspector).filesystem(fs).recordIdColumn(5).finalDestination(root);
     RecordUpdater ru = of.getRecordUpdater(root, options);
     values = new String[]{"0.0", null, null, "1.1", null, null, null,
         "ignore.7"};
@@ -920,6 +928,7 @@ public class TestOrcRawRecordMerger {
     options.orcOptions(OrcFile.writerOptions(conf)
       .stripeSize(1).blockPadding(false).compress(CompressionKind.NONE)
       .memory(mgr));
+    options.finalDestination(root);
     RecordUpdater ru = of.getRecordUpdater(root, options);
     String[] values= new String[]{"ignore.1", "0.1", "ignore.2", "ignore.3",
         "2.0", "2.1", "3.0", "ignore.4", "ignore.5", "ignore.6"};
@@ -1004,7 +1013,8 @@ public class TestOrcRawRecordMerger {
     AcidOutputFormat.Options options =
         new AcidOutputFormat.Options(conf)
             .bucket(BUCKET).inspector(inspector).filesystem(fs)
-            .writingBase(false).minimumTransactionId(1).maximumTransactionId(1);
+            .writingBase(false).minimumTransactionId(1).maximumTransactionId(1)
+          .finalDestination(root);
     RecordUpdater ru = of.getRecordUpdater(root, options);
     String[] values = new String[]{"a", "b", "c", "d", "e"};
     for(int i=0; i < values.length; ++i) {
@@ -1047,6 +1057,14 @@ public class TestOrcRawRecordMerger {
    */
   @Test
   public void testRecordReaderIncompleteDelta() throws Exception {
+    testRecordReaderIncompleteDelta(false);
+    testRecordReaderIncompleteDelta(true);
+  }
+  /**
+   * 
+   * @param use130Format true means use delta_0001_0001_0000 format, else delta_0001_00001
+   */
+  private void testRecordReaderIncompleteDelta(boolean use130Format) throws Exception {
     final int BUCKET = 1;
     Configuration conf = new Configuration();
     OrcOutputFormat of = new OrcOutputFormat();
@@ -1063,7 +1081,10 @@ public class TestOrcRawRecordMerger {
     AcidOutputFormat.Options options =
         new AcidOutputFormat.Options(conf)
             .writingBase(true).minimumTransactionId(0).maximumTransactionId(0)
-            .bucket(BUCKET).inspector(inspector).filesystem(fs);
+            .bucket(BUCKET).inspector(inspector).filesystem(fs).finalDestination(root);
+    if(!use130Format) {
+      options.statementId(-1);
+    }
     RecordUpdater ru = of.getRecordUpdater(root, options);
     String[] values= new String[]{"1", "2", "3", "4", "5"};
     for(int i=0; i < values.length; ++i) {
@@ -1110,8 +1131,8 @@ public class TestOrcRawRecordMerger {
     splits = inf.getSplits(job, 1);
     assertEquals(2, splits.length);
     rr = inf.getRecordReader(splits[0], job, Reporter.NULL);
-    Path sideFile = new Path(root +
-        "/delta_0000010_0000019/bucket_00001_flush_length");
+    Path sideFile = new Path(root + "/" + (use130Format ? AcidUtils.deltaSubdir(10,19,0) :
+      AcidUtils.deltaSubdir(10,19)) + "/bucket_00001_flush_length");
     assertEquals(true, fs.exists(sideFile));
     assertEquals(24, fs.getFileStatus(sideFile).getLen());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
index 22bd4b9..22030b4 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
@@ -97,7 +97,8 @@ public class TestOrcRecordUpdater {
         .minimumTransactionId(10)
         .maximumTransactionId(19)
         .inspector(inspector)
-        .reporter(Reporter.NULL);
+        .reporter(Reporter.NULL)
+        .finalDestination(root);
     RecordUpdater updater = new OrcRecordUpdater(root, options);
     updater.insert(11, new MyRow("first"));
     updater.insert(11, new MyRow("second"));
@@ -197,7 +198,8 @@ public class TestOrcRecordUpdater {
         .maximumTransactionId(100)
         .inspector(inspector)
         .reporter(Reporter.NULL)
-        .recordIdColumn(1);
+        .recordIdColumn(1)
+        .finalDestination(root);
     RecordUpdater updater = new OrcRecordUpdater(root, options);
     updater.update(100, new MyRow("update", 30, 10, bucket));
     updater.delete(100, new MyRow("", 60, 40, bucket));

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
index 671e122..21adc9d 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
@@ -241,7 +241,7 @@ public abstract class CompactorTest {
     return sd;
   }
 
-  // I can't do this with @Before because I want to be able to control when the thead starts
+  // I can't do this with @Before because I want to be able to control when the thread starts
   private void startThread(char type, boolean stopAfterOne) throws Exception {
     startThread(type, stopAfterOne, new AtomicBoolean());
   }
@@ -284,7 +284,7 @@ public abstract class CompactorTest {
     switch (type) {
       case BASE: filename = "base_" + maxTxn; break;
       case LENGTH_FILE: // Fall through to delta
-      case DELTA: filename = "delta_" + minTxn + "_" + maxTxn; break;
+      case DELTA: filename = makeDeltaDirName(minTxn, maxTxn); break;
       case LEGACY: break; // handled below
     }
 
@@ -508,5 +508,21 @@ public abstract class CompactorTest {
     }
   }
 
+  /**
+   * in Hive 1.3.0 delta file names changed to delta_xxxx_yyyy_zzzz; prior to that
+   * the name was delta_xxxx_yyyy.  We want to run compaction tests such that both formats
+   * are used since new (1.3) code has to be able to read old files.
+   */
+  abstract boolean useHive130DeltaDirName();
 
+  String makeDeltaDirName(long minTxnId, long maxTxnId) {
+    return useHive130DeltaDirName() ?
+      AcidUtils.deltaSubdir(minTxnId, maxTxnId, 0) : AcidUtils.deltaSubdir(minTxnId, maxTxnId);
+  }
+  /**
+   * delta dir name after compaction
+   */
+  String makeDeltaDirNameCompacted(long minTxnId, long maxTxnId) {
+    return AcidUtils.deltaSubdir(minTxnId, maxTxnId);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
index ffdbb9a..0db732c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
@@ -139,7 +139,7 @@ public class TestCleaner extends CompactorTest {
     boolean sawBase = false, sawDelta = false;
     for (Path p : paths) {
       if (p.getName().equals("base_20")) sawBase = true;
-      else if (p.getName().equals("delta_21_24")) sawDelta = true;
+      else if (p.getName().equals(makeDeltaDirName(21, 24))) sawDelta = true;
       else Assert.fail("Unexpected file " + p.getName());
     }
     Assert.assertTrue(sawBase);
@@ -177,7 +177,7 @@ public class TestCleaner extends CompactorTest {
     boolean sawBase = false, sawDelta = false;
     for (Path path : paths) {
       if (path.getName().equals("base_20")) sawBase = true;
-      else if (path.getName().equals("delta_21_24")) sawDelta = true;
+      else if (path.getName().equals(makeDeltaDirNameCompacted(21, 24))) sawDelta = true;
       else Assert.fail("Unexpected file " + path.getName());
     }
     Assert.assertTrue(sawBase);
@@ -480,4 +480,8 @@ public class TestCleaner extends CompactorTest {
     ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
     Assert.assertEquals(0, rsp.getCompactsSize());
   }
+  @Override
+  boolean useHive130DeltaDirName() {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner2.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner2.java
new file mode 100644
index 0000000..c637dd1
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner2.java
@@ -0,0 +1,14 @@
+package org.apache.hadoop.hive.ql.txn.compactor;
+
+/**
+ * Same as TestCleaner but tests delta file names in Hive 1.3.0 format 
+ */
+public class TestCleaner2 extends TestCleaner {
+  public TestCleaner2() throws Exception {
+    super();
+  }
+  @Override
+  boolean useHive130DeltaDirName() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
index 00b13de..0b0b1da 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
@@ -713,5 +713,9 @@ public class TestInitiator extends CompactorTest {
     List<ShowCompactResponseElement> compacts = rsp.getCompacts();
     Assert.assertEquals(0, compacts.size());
   }
+  @Override
+  boolean useHive130DeltaDirName() {
+    return false;
+  }
 
 }


[37/50] [abbrv] hive git commit: HIVE-11251: CBO (Calcite Return Path): Extending ExprNodeConverter to consider additional types (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-11251: CBO (Calcite Return Path): Extending ExprNodeConverter to consider additional types (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8662d9da
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8662d9da
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8662d9da

Branch: refs/heads/beeline-cli
Commit: 8662d9dae3da1cdbec3ac8c2c4f7d9f12ae5f1f0
Parents: af4aeab
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed Jul 15 07:06:14 2015 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed Jul 15 18:31:59 2015 +0100

----------------------------------------------------------------------
 .../calcite/translator/ExprNodeConverter.java      | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8662d9da/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index bcce74a..4f0db03 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
+import java.math.BigDecimal;
 import java.sql.Date;
 import java.sql.Timestamp;
 import java.util.ArrayList;
@@ -43,6 +44,8 @@ import org.apache.calcite.sql.type.SqlTypeUtil;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.common.type.HiveChar;
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor;
@@ -199,6 +202,7 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
       return new ExprNodeConstantDesc(TypeInfoFactory.longTypeInfo, Long.valueOf(((Number) literal
           .getValue3()).longValue()));
     case FLOAT:
+    case REAL:
       return new ExprNodeConstantDesc(TypeInfoFactory.floatTypeInfo,
           Float.valueOf(((Number) literal.getValue3()).floatValue()));
     case DOUBLE:
@@ -207,6 +211,7 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
     case DATE:
       return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo,
         new Date(((Calendar)literal.getValue()).getTimeInMillis()));
+    case TIME:
     case TIMESTAMP: {
       Object value = literal.getValue3();
       if (value instanceof Long) {
@@ -225,6 +230,18 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
     case CHAR:
       return new ExprNodeConstantDesc(TypeInfoFactory.getCharTypeInfo(lType.getPrecision()),
           new HiveChar((String) literal.getValue3(), lType.getPrecision()));
+    case INTERVAL_YEAR_MONTH: {
+      BigDecimal monthsBd = (BigDecimal) literal.getValue();
+      return new ExprNodeConstantDesc(TypeInfoFactory.intervalYearMonthTypeInfo,
+              new HiveIntervalYearMonth(monthsBd.intValue()));
+    }
+    case INTERVAL_DAY_TIME: {
+      BigDecimal millisBd = (BigDecimal) literal.getValue();
+      // Calcite literal is in millis, we need to convert to seconds
+      BigDecimal secsBd = millisBd.divide(BigDecimal.valueOf(1000));
+      return new ExprNodeConstantDesc(TypeInfoFactory.intervalDayTimeTypeInfo,
+              new HiveIntervalDayTime(secsBd));
+    }
     case OTHER:
     default:
       return new ExprNodeConstantDesc(TypeInfoFactory.voidTypeInfo, literal.getValue3());


[20/50] [abbrv] hive git commit: HIVE-11225: Running all Hive UTs or itests executes only small subset of tests(Ferdinand Xu, reviewed by Hari Sankar Sivarama Subramaniyan)

Posted by xu...@apache.org.
HIVE-11225: Running all Hive UTs or itests executes only small subset of tests(Ferdinand Xu, reviewed by Hari Sankar Sivarama Subramaniyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5c94bda9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5c94bda9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5c94bda9

Branch: refs/heads/beeline-cli
Commit: 5c94bda99399d7861ba2c83de707305655231925
Parents: ad1cb15
Author: Ferdinand Xu <ch...@intel.com>
Authored: Sun Jul 12 21:50:20 2015 -0400
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Sun Jul 12 21:50:20 2015 -0400

----------------------------------------------------------------------
 pom.xml | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5c94bda9/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index f2cb761..1abf738 100644
--- a/pom.xml
+++ b/pom.xml
@@ -826,9 +826,8 @@
             <exclude>**/ql/exec/vector/udf/legacy/*.java</exclude>
             <exclude>**/ql/exec/vector/udf/generic/*.java</exclude>
             <exclude>**/TestHiveServer2Concurrency.java</exclude>
-            <exclude>**/TestHiveMetaStore.java</exclude>
             <exclude>${test.excludes.additional}</exclude>
-            <exclude>%regex[${skip.spark.files}]</exclude>
+            <exclude>${skip.spark.files}</exclude>
           </excludes>
           <redirectTestOutputToFile>true</redirectTestOutputToFile>
           <reuseForks>false</reuseForks>
@@ -1219,7 +1218,9 @@
 	</property>
       </activation>
       <properties>
-        <skip.spark.files>.*[TestSparkSessionManagerImpl|TestMultiSessionsHS2WithLocalClusterSpark|TestJdbcWithLocalClusterSpark].class</skip.spark.files>
+        <skip.spark.files>
+          **/ql/exec/spark/session/TestSparkSessionManagerImpl.java,**/TestMultiSessionsHS2WithLocalClusterSpark.java,**/TestJdbcWithLocalClusterSpark.java
+        </skip.spark.files>
       </properties>
     </profile>
   </profiles>


[29/50] [abbrv] hive git commit: HIVE-10882 : CBO: Calcite Operator To Hive Operator (Calcite Return Path) empty filtersMap of join operator causes wrong results (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-10882 : CBO: Calcite Operator To Hive Operator (Calcite Return Path) empty filtersMap of join operator causes wrong results (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5363af9a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5363af9a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5363af9a

Branch: refs/heads/beeline-cli
Commit: 5363af9aa1d98f82bdef4861b533314dc19a77d0
Parents: 2620ebb
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Mon Jul 13 05:26:49 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Mon Jul 13 15:31:37 2015 -0700

----------------------------------------------------------------------
 .../ql/optimizer/calcite/HiveRelOptUtil.java    |   37 -
 .../calcite/reloperators/HiveJoin.java          |   14 +
 .../calcite/reloperators/HiveMultiJoin.java     |   28 +-
 .../calcite/reloperators/HiveSemiJoin.java      |   57 +-
 .../calcite/rules/HiveJoinToMultiJoinRule.java  |   50 +-
 .../calcite/rules/HiveRelFieldTrimmer.java      |    3 +-
 .../calcite/translator/HiveOpConverter.java     |  122 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |    4 +-
 .../queries/clientpositive/fouter_join_ppr.q    |   73 +
 .../clientpositive/fouter_join_ppr.q.out        | 1694 ++++++++++++++++++
 10 files changed, 1974 insertions(+), 108 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
index ab793f1..5a5954d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
@@ -226,43 +226,6 @@ public class HiveRelOptUtil extends RelOptUtil {
         }
       }
 
-//      if ((rangeOp == null)
-//          && ((leftKey == null) || (rightKey == null))) {
-//        // no equality join keys found yet:
-//        // try transforming the condition to
-//        // equality "join" conditions, e.g.
-//        //     f(LHS) > 0 ===> ( f(LHS) > 0 ) = TRUE,
-//        // and make the RHS produce TRUE, but only if we're strictly
-//        // looking for equi-joins
-//        final ImmutableBitSet projRefs = InputFinder.bits(condition);
-//        leftKey = null;
-//        rightKey = null;
-//
-//        boolean foundInput = false;
-//        for (int i = 0; i < inputs.size() && !foundInput; i++) {
-//          final int lowerLimit = inputsRange[i].nextSetBit(0);
-//          final int upperLimit = inputsRange[i].length();
-//          if (projRefs.nextSetBit(lowerLimit) < upperLimit) {
-//            leftInput = i;
-//            leftFields = inputs.get(leftInput).getRowType().getFieldList();
-//
-//            leftKey = condition.accept(
-//                new RelOptUtil.RexInputConverter(
-//                    rexBuilder,
-//                    leftFields,
-//                    leftFields,
-//                    adjustments));
-//
-//            rightKey = rexBuilder.makeLiteral(true);
-//
-//            // effectively performing an equality comparison
-//            kind = SqlKind.EQUALS;
-//
-//            foundInput = true;
-//          }
-//        }
-//      }
-
       if ((leftKey != null) && (rightKey != null)) {
         // found suitable join keys
         // add them to key list, ensuring that if there is a

http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
index 6814df6..ffd3196 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
@@ -43,6 +43,7 @@ import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.ImmutableIntList;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCostModel.JoinAlgorithm;
@@ -60,6 +61,7 @@ public class HiveJoin extends Join implements HiveRelNode {
   }
 
   private final boolean leftSemiJoin;
+  private final RexNode joinFilter;
   private final JoinPredicateInfo joinPredInfo;
   private JoinAlgorithm joinAlgorithm;
   private RelOptCost joinCost;
@@ -82,6 +84,14 @@ public class HiveJoin extends Join implements HiveRelNode {
       JoinAlgorithm joinAlgo, boolean leftSemiJoin) throws InvalidRelException, CalciteSemanticException {
     super(cluster, TraitsUtil.getDefaultTraitSet(cluster), left, right, condition, joinType,
         variablesStopped);
+    final List<RelDataTypeField> systemFieldList = ImmutableList.of();
+    List<List<RexNode>> joinKeyExprs = new ArrayList<List<RexNode>>();
+    List<Integer> filterNulls = new ArrayList<Integer>();
+    for (int i=0; i<this.getInputs().size(); i++) {
+      joinKeyExprs.add(new ArrayList<RexNode>());
+    }
+    this.joinFilter = HiveRelOptUtil.splitHiveJoinCondition(systemFieldList, this.getInputs(),
+            this.getCondition(), joinKeyExprs, filterNulls, null);
     this.joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(this);
     this.joinAlgorithm = joinAlgo;
     this.leftSemiJoin = leftSemiJoin;
@@ -105,6 +115,10 @@ public class HiveJoin extends Join implements HiveRelNode {
     }
   }
 
+  public RexNode getJoinFilter() {
+    return joinFilter;
+  }
+
   public JoinPredicateInfo getJoinPredicateInfo() {
     return joinPredInfo;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
index 7a43f29..660f01d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
@@ -49,6 +49,7 @@ public final class HiveMultiJoin extends AbstractRelNode {
   private final RelDataType rowType;
   private final ImmutableList<Pair<Integer,Integer>> joinInputs;
   private final ImmutableList<JoinRelType> joinTypes;
+  private final ImmutableList<RexNode> filters;
 
   private final boolean outerJoin;
   private final JoinPredicateInfo joinPredInfo;
@@ -59,30 +60,34 @@ public final class HiveMultiJoin extends AbstractRelNode {
    *
    * @param cluster               cluster that join belongs to
    * @param inputs                inputs into this multi-join
-   * @param condition            join filter applicable to this join node
+   * @param condition             join filter applicable to this join node
    * @param rowType               row type of the join result of this node
-   * @param joinInputs
+   * @param joinInputs            
    * @param joinTypes             the join type corresponding to each input; if
    *                              an input is null-generating in a left or right
    *                              outer join, the entry indicates the type of
    *                              outer join; otherwise, the entry is set to
    *                              INNER
+   * @param filters               filters associated with each join
+   *                              input
    */
   public HiveMultiJoin(
       RelOptCluster cluster,
       List<RelNode> inputs,
-      RexNode joinFilter,
+      RexNode condition,
       RelDataType rowType,
       List<Pair<Integer,Integer>> joinInputs,
-      List<JoinRelType> joinTypes) {
+      List<JoinRelType> joinTypes,
+      List<RexNode> filters) {
     super(cluster, TraitsUtil.getDefaultTraitSet(cluster));
     this.inputs = Lists.newArrayList(inputs);
-    this.condition = joinFilter;
+    this.condition = condition;
     this.rowType = rowType;
 
     assert joinInputs.size() == joinTypes.size();
     this.joinInputs = ImmutableList.copyOf(joinInputs);
     this.joinTypes = ImmutableList.copyOf(joinTypes);
+    this.filters = ImmutableList.copyOf(filters);
     this.outerJoin = containsOuter();
 
     try {
@@ -107,7 +112,8 @@ public final class HiveMultiJoin extends AbstractRelNode {
         condition,
         rowType,
         joinInputs,
-        joinTypes);
+        joinTypes,
+        filters);
   }
 
   @Override
@@ -156,7 +162,8 @@ public final class HiveMultiJoin extends AbstractRelNode {
         joinFilter,
         rowType,
         joinInputs,
-        joinTypes);
+        joinTypes,
+        filters);
   }
 
   /**
@@ -188,6 +195,13 @@ public final class HiveMultiJoin extends AbstractRelNode {
   }
 
   /**
+   * @return join conditions filters
+   */
+  public List<RexNode> getJoinFilters() {
+    return filters;
+  }
+
+  /**
    * @return the join predicate information
    */
   public JoinPredicateInfo getJoinPredicateInfo() {

http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
index dd1691c..af82822 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
@@ -17,39 +17,86 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
 
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.InvalidRelException;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.JoinInfo;
 import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.core.RelFactories.SemiJoinFactory;
 import org.apache.calcite.rel.core.SemiJoin;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.util.ImmutableIntList;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil;
+
+import com.google.common.collect.ImmutableList;
 
 public class HiveSemiJoin extends SemiJoin implements HiveRelNode {
 
   public static final SemiJoinFactory HIVE_SEMIJOIN_FACTORY = new HiveSemiJoinFactoryImpl();
 
-  public HiveSemiJoin(RelOptCluster cluster,
+  private final RexNode joinFilter;
+
+
+  public static HiveSemiJoin getSemiJoin(
+          RelOptCluster cluster,
           RelTraitSet traitSet,
           RelNode left,
           RelNode right,
           RexNode condition,
           ImmutableIntList leftKeys,
           ImmutableIntList rightKeys) {
+    try {
+      HiveSemiJoin semiJoin = new HiveSemiJoin(cluster, traitSet, left, right,
+              condition, leftKeys, rightKeys);
+      return semiJoin;
+    } catch (InvalidRelException | CalciteSemanticException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  protected HiveSemiJoin(RelOptCluster cluster,
+          RelTraitSet traitSet,
+          RelNode left,
+          RelNode right,
+          RexNode condition,
+          ImmutableIntList leftKeys,
+          ImmutableIntList rightKeys) throws InvalidRelException, CalciteSemanticException {
     super(cluster, traitSet, left, right, condition, leftKeys, rightKeys);
+    final List<RelDataTypeField> systemFieldList = ImmutableList.of();
+    List<List<RexNode>> joinKeyExprs = new ArrayList<List<RexNode>>();
+    List<Integer> filterNulls = new ArrayList<Integer>();
+    for (int i=0; i<this.getInputs().size(); i++) {
+      joinKeyExprs.add(new ArrayList<RexNode>());
+    }
+    this.joinFilter = HiveRelOptUtil.splitHiveJoinCondition(systemFieldList, this.getInputs(),
+            this.getCondition(), joinKeyExprs, filterNulls, null);
+  }
+
+  public RexNode getJoinFilter() {
+    return joinFilter;
   }
 
   @Override
   public SemiJoin copy(RelTraitSet traitSet, RexNode condition,
           RelNode left, RelNode right, JoinRelType joinType, boolean semiJoinDone) {
-    final JoinInfo joinInfo = JoinInfo.of(left, right, condition);
-    return new HiveSemiJoin(getCluster(), traitSet, left, right, condition,
-            joinInfo.leftKeys, joinInfo.rightKeys);
+    try {
+      final JoinInfo joinInfo = JoinInfo.of(left, right, condition);
+      return new HiveSemiJoin(getCluster(), traitSet, left, right, condition,
+              joinInfo.leftKeys, joinInfo.rightKeys);
+    } catch (InvalidRelException | CalciteSemanticException e) {
+      // Semantic error not possible. Must be a bug. Convert to
+      // internal error.
+      throw new AssertionError(e);
+    }
   }
 
   @Override
@@ -72,7 +119,7 @@ public class HiveSemiJoin extends SemiJoin implements HiveRelNode {
             RexNode condition) {
       final JoinInfo joinInfo = JoinInfo.of(left, right, condition);
       final RelOptCluster cluster = left.getCluster();
-      return new HiveSemiJoin(cluster, left.getTraitSet(), left, right, condition,
+      return getSemiJoin(cluster, left.getTraitSet(), left, right, condition,
           joinInfo.leftKeys, joinInfo.rightKeys);
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
index a0144f3..d0a29a7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
@@ -124,25 +124,29 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
 
     // We check whether the join can be combined with any of its children
     final List<RelNode> newInputs = Lists.newArrayList();
-    final List<RexNode> newJoinFilters = Lists.newArrayList();
-    newJoinFilters.add(join.getCondition());
-    final List<Pair<Pair<Integer,Integer>, JoinRelType>> joinSpecs = Lists.newArrayList();
+    final List<RexNode> newJoinCondition = Lists.newArrayList();
+    final List<Pair<Integer,Integer>> joinInputs = Lists.newArrayList();
+    final List<JoinRelType> joinTypes = Lists.newArrayList();
+    final List<RexNode> joinFilters = Lists.newArrayList();
 
     // Left child
-    if (left instanceof Join || left instanceof HiveMultiJoin) {
+    if (left instanceof HiveJoin || left instanceof HiveMultiJoin) {
       final RexNode leftCondition;
       final List<Pair<Integer,Integer>> leftJoinInputs;
       final List<JoinRelType> leftJoinTypes;
-      if (left instanceof Join) {
-        Join hj = (Join) left;
+      final List<RexNode> leftJoinFilters;
+      if (left instanceof HiveJoin) {
+        HiveJoin hj = (HiveJoin) left;
         leftCondition = hj.getCondition();
         leftJoinInputs = ImmutableList.of(Pair.of(0, 1));
         leftJoinTypes = ImmutableList.of(hj.getJoinType());
+        leftJoinFilters = ImmutableList.of(hj.getJoinFilter());
       } else {
         HiveMultiJoin hmj = (HiveMultiJoin) left;
         leftCondition = hmj.getCondition();
         leftJoinInputs = hmj.getJoinInputs();
         leftJoinTypes = hmj.getJoinTypes();
+        leftJoinFilters = hmj.getJoinFilters();
       }
 
       boolean combinable;
@@ -154,9 +158,11 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
         combinable = false;
       }
       if (combinable) {
-        newJoinFilters.add(leftCondition);
+        newJoinCondition.add(leftCondition);
         for (int i = 0; i < leftJoinInputs.size(); i++) {
-          joinSpecs.add(Pair.of(leftJoinInputs.get(i), leftJoinTypes.get(i)));
+          joinInputs.add(leftJoinInputs.get(i));
+          joinTypes.add(leftJoinTypes.get(i));
+          joinFilters.add(leftJoinFilters.get(i));
         }
         newInputs.addAll(left.getInputs());
       } else { // The join operation in the child is not on the same keys
@@ -171,7 +177,8 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
     newInputs.add(right);
 
     // If we cannot combine any of the children, we bail out
-    if (newJoinFilters.size() == 1) {
+    newJoinCondition.add(join.getCondition());
+    if (newJoinCondition.size() == 1) {
       return null;
     }
 
@@ -181,18 +188,14 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
     for (int i=0; i<newInputs.size(); i++) {
       joinKeyExprs.add(new ArrayList<RexNode>());
     }
-    RexNode otherCondition;
+    RexNode filters;
     try {
-      otherCondition = HiveRelOptUtil.splitHiveJoinCondition(systemFieldList, newInputs, join.getCondition(),
-          joinKeyExprs, filterNulls, null);
+      filters = HiveRelOptUtil.splitHiveJoinCondition(systemFieldList, newInputs,
+          join.getCondition(), joinKeyExprs, filterNulls, null);
     } catch (CalciteSemanticException e) {
         LOG.trace("Failed to merge joins", e);
         return null;
     }
-    // If there are remaining parts in the condition, we bail out
-    if (!otherCondition.isAlwaysTrue()) {
-      return null;
-    }
     ImmutableBitSet.Builder keysInInputsBuilder = ImmutableBitSet.builder();
     for (int i=0; i<newInputs.size(); i++) {
       List<RexNode> partialCondition = joinKeyExprs.get(i);
@@ -214,25 +217,30 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
     if (join.getJoinType() != JoinRelType.INNER) {
       int leftInput = keysInInputs.nextSetBit(0);
       int rightInput = keysInInputs.nextSetBit(numberLeftInputs);
-      joinSpecs.add(Pair.of(Pair.of(leftInput, rightInput), join.getJoinType()));
+      joinInputs.add(Pair.of(leftInput, rightInput));
+      joinTypes.add(join.getJoinType());
+      joinFilters.add(filters);
     } else {
       for (int i : leftReferencedInputs) {
         for (int j : rightReferencedInputs) {
-          joinSpecs.add(Pair.of(Pair.of(i, j), join.getJoinType()));
+          joinInputs.add(Pair.of(i, j));
+          joinTypes.add(join.getJoinType());
+          joinFilters.add(filters);
         }
       }
     }
 
     // We can now create a multijoin operator
     RexNode newCondition = RexUtil.flatten(rexBuilder,
-            RexUtil.composeConjunction(rexBuilder, newJoinFilters, false));
+            RexUtil.composeConjunction(rexBuilder, newJoinCondition, false));
     return new HiveMultiJoin(
             join.getCluster(),
             newInputs,
             newCondition,
             join.getRowType(),
-            Pair.left(joinSpecs),
-            Pair.right(joinSpecs));
+            joinInputs,
+            joinTypes,
+            joinFilters);
   }
 
   private static boolean isCombinablePredicate(Join join,

http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
index f72f67f..4144674 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
@@ -151,7 +151,8 @@ public class HiveRelFieldTrimmer extends RelFieldTrimmer {
             newConditionExpr,
             newRowType,
             join.getJoinInputs(),
-            join.getJoinTypes());
+            join.getJoinTypes(),
+            join.getJoinFilters());
 
     return new TrimResult(newJoin, mapping);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index 86ac4d1..c711406 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelCollations;
 import org.apache.calcite.rel.RelDistribution;
 import org.apache.calcite.rel.RelDistribution.Type;
@@ -55,8 +56,6 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.AcidUtils.Operation;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
@@ -98,6 +97,7 @@ import org.apache.hadoop.hive.ql.plan.UnionDesc;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
 
 public class HiveOpConverter {
 
@@ -352,13 +352,6 @@ public class HiveOpConverter {
           + " with row type: [" + joinRel.getRowType() + "]");
     }
 
-    JoinPredicateInfo joinPredInfo;
-    if (joinRel instanceof HiveJoin) {
-      joinPredInfo = ((HiveJoin)joinRel).getJoinPredicateInfo();
-    } else {
-      joinPredInfo = ((HiveMultiJoin)joinRel).getJoinPredicateInfo();
-    }
-
     // 4. Extract join key expressions from HiveSortExchange
     ExprNodeDesc[][] joinExpressions = new ExprNodeDesc[inputs.length][];
     for (int i = 0; i < inputs.length; i++) {
@@ -368,23 +361,22 @@ public class HiveOpConverter {
     // 5. Extract rest of join predicate info. We infer the rest of join condition
     //    that will be added to the filters (join conditions that are not part of
     //    the join key)
-    ExprNodeDesc[][] filterExpressions = new ExprNodeDesc[inputs.length][];
-    for (int i = 0; i< inputs.length; i++) {
+    List<RexNode> joinFilters;
+    if (joinRel instanceof HiveJoin) {
+      joinFilters = ImmutableList.of(((HiveJoin)joinRel).getJoinFilter());
+    } else {
+      joinFilters = ((HiveMultiJoin)joinRel).getJoinFilters();
+    }
+    List<List<ExprNodeDesc>> filterExpressions = Lists.newArrayList();
+    for (int i = 0; i< joinFilters.size(); i++) {
       List<ExprNodeDesc> filterExpressionsForInput = new ArrayList<ExprNodeDesc>();
-      Set<String> keySet = new HashSet<String>();
-      for (int j = 0; j < joinPredInfo.getNonEquiJoinPredicateElements().size(); j++) {
-        JoinLeafPredicateInfo joinLeafPredInfo = joinPredInfo.
-            getNonEquiJoinPredicateElements().get(j);
-        for (RexNode joinExprNode : joinLeafPredInfo.getJoinExprs(i)) {
-          if (keySet.add(joinExprNode.toString())) {
-            ExprNodeDesc expr = convertToExprNode(joinExprNode, joinRel,
-                    null, newVcolsInCalcite);
-            filterExpressionsForInput.add(expr);
-          }
+      if (joinFilters.get(i) != null) {
+        for (RexNode conj : RelOptUtil.conjunctions(joinFilters.get(i))) {
+          ExprNodeDesc expr = convertToExprNode(conj, joinRel, null, newVcolsInCalcite);
+          filterExpressionsForInput.add(expr);
         }
       }
-      filterExpressions[i] = filterExpressionsForInput.toArray(
-              new ExprNodeDesc[filterExpressionsForInput.size()]);
+      filterExpressions.add(filterExpressionsForInput);
     }
 
     // 6. Generate Join operator
@@ -822,7 +814,7 @@ public class HiveOpConverter {
   }
 
   private static JoinOperator genJoin(RelNode join, ExprNodeDesc[][] joinExpressions,
-      ExprNodeDesc[][] filterExpressions, List<Operator<?>> children,
+      List<List<ExprNodeDesc>> filterExpressions, List<Operator<?>> children,
       String[] baseSrc, String tabAlias) throws SemanticException {
 
     // 1. Extract join type
@@ -849,6 +841,7 @@ public class HiveOpConverter {
               && joinType != JoinType.RIGHTOUTER;
     }
 
+    // 2. We create the join aux structures
     ArrayList<ColumnInfo> outputColumns = new ArrayList<ColumnInfo>();
     ArrayList<String> outputColumnNames = new ArrayList<String>(join.getRowType()
         .getFieldNames());
@@ -862,7 +855,7 @@ public class HiveOpConverter {
 
     int outputPos = 0;
     for (int pos = 0; pos < children.size(); pos++) {
-      // 2. Backtracking from RS
+      // 2.1. Backtracking from RS
       ReduceSinkOperator inputRS = (ReduceSinkOperator) children.get(pos);
       if (inputRS.getNumParent() != 1) {
         throw new SemanticException("RS should have single parent");
@@ -874,7 +867,7 @@ public class HiveOpConverter {
 
       Byte tag = (byte) rsDesc.getTag();
 
-      // 2.1. If semijoin...
+      // 2.1.1. If semijoin...
       if (semiJoin && pos != 0) {
         exprMap.put(tag, new ArrayList<ExprNodeDesc>());
         childOps[pos] = inputRS;
@@ -902,22 +895,52 @@ public class HiveOpConverter {
       exprMap.put(tag, new ArrayList<ExprNodeDesc>(descriptors.values()));
       colExprMap.putAll(descriptors);
       childOps[pos] = inputRS;
+    }
+
+    // 3. We populate the filters and filterMap structure needed in the join descriptor
+    List<List<ExprNodeDesc>> filtersPerInput = Lists.newArrayList();
+    int[][] filterMap = new int[children.size()][];
+    for (int i=0; i<children.size(); i++) {
+      filtersPerInput.add(new ArrayList<ExprNodeDesc>());
+    }
+    // 3. We populate the filters structure
+    for (int i=0; i<filterExpressions.size(); i++) {
+      int leftPos = joinCondns[i].getLeft();
+      int rightPos = joinCondns[i].getRight();
 
-      // 3. We populate the filters structure
-      List<ExprNodeDesc> filtersForInput = new ArrayList<ExprNodeDesc>();
-      for (ExprNodeDesc expr : filterExpressions[pos]) {
+      for (ExprNodeDesc expr : filterExpressions.get(i)) {
         // We need to update the exprNode, as currently 
         // they refer to columns in the output of the join;
         // they should refer to the columns output by the RS
-        updateExprNode(expr, colExprMap);
-        filtersForInput.add(expr);
+        int inputPos = updateExprNode(expr, reversedExprs, colExprMap);
+        if (inputPos == -1) {
+          inputPos = leftPos;
+        }
+        filtersPerInput.get(inputPos).add(expr);
+
+        if (joinCondns[i].getType() == JoinDesc.FULL_OUTER_JOIN ||
+                joinCondns[i].getType() == JoinDesc.LEFT_OUTER_JOIN ||
+                joinCondns[i].getType() == JoinDesc.RIGHT_OUTER_JOIN) {
+          if (inputPos == leftPos) {
+            updateFilterMap(filterMap, leftPos, rightPos);          
+          } else {
+            updateFilterMap(filterMap, rightPos, leftPos);          
+          }
+        }
       }
-      filters.put(tag, filtersForInput);
+    }
+    for (int pos = 0; pos < children.size(); pos++) {
+      ReduceSinkOperator inputRS = (ReduceSinkOperator) children.get(pos);
+      ReduceSinkDesc rsDesc = inputRS.getConf();
+      Byte tag = (byte) rsDesc.getTag();
+      filters.put(tag, filtersPerInput.get(pos));
     }
 
+    // 4. We create the join operator with its descriptor
     JoinDesc desc = new JoinDesc(exprMap, outputColumnNames, noOuterJoin, joinCondns,
             filters, joinExpressions);
     desc.setReversedExprs(reversedExprs);
+    desc.setFilterMap(filterMap);
 
     JoinOperator joinOp = (JoinOperator) OperatorFactory.getAndMakeChild(desc, new RowSchema(
         outputColumns), childOps);
@@ -940,20 +963,49 @@ public class HiveOpConverter {
    * the execution engine expects filters in the Join operators
    * to be expressed that way.
    */
-  private static void updateExprNode(ExprNodeDesc expr, Map<String, ExprNodeDesc> colExprMap) {
+  private static int updateExprNode(ExprNodeDesc expr, final Map<String, Byte> reversedExprs,
+          final Map<String, ExprNodeDesc> colExprMap) {
+    int inputPos = -1;
     if (expr instanceof ExprNodeGenericFuncDesc) {
       ExprNodeGenericFuncDesc func = (ExprNodeGenericFuncDesc) expr;
       List<ExprNodeDesc> newChildren = new ArrayList<ExprNodeDesc>();
       for (ExprNodeDesc functionChild : func.getChildren()) {
         if (functionChild instanceof ExprNodeColumnDesc) {
-          newChildren.add(colExprMap.get(functionChild.getExprString()));
+          String colRef = functionChild.getExprString();
+          inputPos = reversedExprs.get(colRef);
+          newChildren.add(colExprMap.get(colRef));
         } else {
-          updateExprNode(functionChild, colExprMap);
+          inputPos = updateExprNode(functionChild, reversedExprs, colExprMap);
           newChildren.add(functionChild);
         }
       }
       func.setChildren(newChildren);
     }
+    return inputPos;
+  }
+
+  private static void updateFilterMap(int[][] filterMap, int inputPos, int joinPos) {
+    int[] map = filterMap[inputPos];
+    if (map == null) {
+      filterMap[inputPos] = new int[2];
+      filterMap[inputPos][0] = joinPos;
+      filterMap[inputPos][1]++;
+    } else {
+      boolean inserted = false;
+      for (int j=0; j<map.length/2 && !inserted; j++) {
+        if (map[j*2] == joinPos) {
+          map[j*2+1]++;
+          inserted = true;
+        }
+      }
+      if (!inserted) {
+        int[] newMap = new int[map.length + 2];
+        System.arraycopy(map, 0, newMap, 0, map.length);
+        newMap[map.length] = joinPos;
+        newMap[map.length+1]++;
+        filterMap[inputPos] = newMap;
+      }
+    }
   }
 
   private static JoinType extractJoinType(HiveJoin join) {

http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 3b5dbe2..84bb951 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -1237,8 +1237,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
             HiveProject.DEFAULT_PROJECT_FACTORY, inputRels, leftJoinKeys, rightJoinKeys, 0,
             leftKeys, rightKeys);
 
-        joinRel = new HiveSemiJoin(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), inputRels[0],
-            inputRels[1], calciteJoinCond, ImmutableIntList.copyOf(leftKeys),
+        joinRel = HiveSemiJoin.getSemiJoin(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
+            inputRels[0], inputRels[1], calciteJoinCond, ImmutableIntList.copyOf(leftKeys),
             ImmutableIntList.copyOf(rightKeys));
       } else {
         joinRel = HiveJoin.getJoin(cluster, leftRel, rightRel, calciteJoinCond, calciteJoinType,

http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/test/queries/clientpositive/fouter_join_ppr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/fouter_join_ppr.q b/ql/src/test/queries/clientpositive/fouter_join_ppr.q
new file mode 100644
index 0000000..4bf3705
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/fouter_join_ppr.q
@@ -0,0 +1,73 @@
+set hive.optimize.ppd=true;
+
+-- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25;
+
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25;
+
+EXPLAIN EXTENDED
+ FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key AND a.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25;
+
+ FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key AND a.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25;
+
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08';
+
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08';
+
+EXPLAIN EXTENDED
+ FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08';
+
+ FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08';
+


[42/50] [abbrv] hive git commit: HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out b/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out
index 5488935..2a292fe 100644
--- a/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out
+++ b/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out
@@ -75,8 +75,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -150,8 +148,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -192,8 +188,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/statsfs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/statsfs.q.out b/ql/src/test/results/clientpositive/statsfs.q.out
index b0bca41..2735f5f 100644
--- a/ql/src/test/results/clientpositive/statsfs.q.out
+++ b/ql/src/test/results/clientpositive/statsfs.q.out
@@ -65,8 +65,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -106,8 +104,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -187,8 +183,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -228,8 +222,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -298,7 +290,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -364,7 +355,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -457,8 +447,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -499,8 +487,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out
index 43cc4ef..cefe069 100644
--- a/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out
+++ b/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out
@@ -85,7 +85,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -137,7 +136,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -242,8 +240,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	src_orc_merge_test_part_stat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -293,8 +289,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	src_orc_merge_test_part_stat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -352,8 +346,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	src_orc_merge_test_part_stat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/ctas.q.out b/ql/src/test/results/clientpositive/tez/ctas.q.out
index 5e9aee2..27b189d 100644
--- a/ql/src/test/results/clientpositive/tez/ctas.q.out
+++ b/ql/src/test/results/clientpositive/tez/ctas.q.out
@@ -146,7 +146,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -294,7 +293,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -442,7 +440,6 @@ conb                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -507,7 +504,6 @@ conb                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -656,7 +652,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
index 22afcbf..4451046 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
@@ -941,8 +941,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part_orc     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -985,8 +983,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_orc     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -1029,8 +1025,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part_limit_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -1073,8 +1067,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_limit_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -1116,8 +1108,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1159,8 +1149,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1202,8 +1190,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1245,8 +1231,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1791,8 +1775,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part2_orc    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1835,8 +1817,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part2_orc    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1940,8 +1920,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part2_orc    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1984,8 +1962,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part2_orc    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2256,8 +2232,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2299,8 +2273,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2475,8 +2447,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2518,8 +2488,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
index 8a16645..cb001b9 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
@@ -854,8 +854,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -898,8 +896,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -942,8 +938,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part_limit   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -986,8 +980,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_limit   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -1029,8 +1021,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1072,8 +1062,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1115,8 +1103,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1158,8 +1144,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1691,8 +1675,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1735,8 +1717,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1840,8 +1820,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1884,8 +1862,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2152,8 +2128,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2195,8 +2169,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2299,8 +2271,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2342,8 +2312,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out
index 1f741aa..5bba0cb 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out
@@ -197,8 +197,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -259,8 +257,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -428,8 +424,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -490,8 +484,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -680,8 +672,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -742,8 +732,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -910,8 +898,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -972,8 +958,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1219,8 +1203,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part_orc         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1281,8 +1263,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part_orc         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1451,8 +1431,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part_orc         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1513,8 +1491,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part_orc         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
index a61a2e6..6eb9a93 100644
--- a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
+++ b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
@@ -98,7 +98,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -147,7 +146,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -196,7 +194,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -286,7 +283,6 @@ state               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -404,8 +400,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -449,8 +443,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -506,8 +498,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -551,8 +541,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -608,8 +596,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -653,8 +639,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -755,8 +739,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -800,8 +782,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -922,8 +902,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -967,8 +945,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1024,8 +1000,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1069,8 +1043,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1126,8 +1098,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1171,8 +1141,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1279,8 +1247,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1324,8 +1290,6 @@ Partition Value:    	[Or]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1491,8 +1455,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1536,8 +1498,6 @@ Partition Value:    	[OH]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -1595,8 +1555,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1640,8 +1598,6 @@ Partition Value:    	[OH]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -1699,8 +1655,6 @@ Partition Value:    	[Ca]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1744,8 +1698,6 @@ Partition Value:    	[OH]
 Database:           	default             	 
 Table:              	orc_create_people   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out b/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
index 59c8f2a..590db13 100644
--- a/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
+++ b/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
@@ -1383,7 +1383,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -3834,7 +3833,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/stats_counter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/stats_counter.q.out b/ql/src/test/results/clientpositive/tez/stats_counter.q.out
index e2980e8..8b3dcea 100644
--- a/ql/src/test/results/clientpositive/tez/stats_counter.q.out
+++ b/ql/src/test/results/clientpositive/tez/stats_counter.q.out
@@ -32,7 +32,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -80,7 +79,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/stats_counter_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/tez/stats_counter_partitioned.q.out
index ab1270c..626dcff 100644
--- a/ql/src/test/results/clientpositive/tez/stats_counter_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/tez/stats_counter_partitioned.q.out
@@ -66,8 +66,6 @@ Partition Value:    	[2008, 11]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -108,8 +106,6 @@ Partition Value:    	[2008, 12]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -190,8 +186,6 @@ Partition Value:    	[10, 11]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -232,8 +226,6 @@ Partition Value:    	[10, 12]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -323,8 +315,6 @@ Partition Value:    	[1997]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -363,8 +353,6 @@ Partition Value:    	[1994]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -403,8 +391,6 @@ Partition Value:    	[1998]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -443,8 +429,6 @@ Partition Value:    	[1996]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/stats_noscan_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/tez/stats_noscan_1.q.out
index aa49526..96e0e43 100644
--- a/ql/src/test/results/clientpositive/tez/stats_noscan_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/stats_noscan_1.q.out
@@ -100,8 +100,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -142,8 +140,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -184,8 +180,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -226,8 +220,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -266,7 +258,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -381,8 +372,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -423,8 +412,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -465,8 +452,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -507,8 +492,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/stats_only_null.q.out b/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
index 2725be0..eabda89 100644
--- a/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
@@ -229,8 +229,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -272,8 +270,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/tez/tez_fsstat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_fsstat.q.out b/ql/src/test/results/clientpositive/tez/tez_fsstat.q.out
index 3fcf68c..50666a9 100644
--- a/ql/src/test/results/clientpositive/tez/tez_fsstat.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_fsstat.q.out
@@ -82,8 +82,6 @@ Partition Value:    	[2008-04-08]
 Database:           	default             	 
 Table:              	tab_part            	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/truncate_column.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/truncate_column.q.out b/ql/src/test/results/clientpositive/truncate_column.q.out
index 2d3e378..2b99d86 100644
--- a/ql/src/test/results/clientpositive/truncate_column.q.out
+++ b/ql/src/test/results/clientpositive/truncate_column.q.out
@@ -36,7 +36,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -100,7 +99,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -176,7 +174,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -242,7 +239,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -318,7 +314,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -383,7 +378,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -450,7 +444,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -534,8 +527,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_tab_part       	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -603,8 +594,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_tab_part       	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/unicode_notation.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/unicode_notation.q.out b/ql/src/test/results/clientpositive/unicode_notation.q.out
index 5849705..52da674 100644
--- a/ql/src/test/results/clientpositive/unicode_notation.q.out
+++ b/ql/src/test/results/clientpositive/unicode_notation.q.out
@@ -23,7 +23,6 @@ a                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -70,7 +69,6 @@ a                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -117,7 +115,6 @@ a                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_1.q.out b/ql/src/test/results/clientpositive/union_remove_1.q.out
index 18307fc..35e4458 100644
--- a/ql/src/test/results/clientpositive/union_remove_1.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_1.q.out
@@ -188,7 +188,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_10.q.out b/ql/src/test/results/clientpositive/union_remove_10.q.out
index abc6b8b..2159b7e 100644
--- a/ql/src/test/results/clientpositive/union_remove_10.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_10.q.out
@@ -247,7 +247,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_11.q.out b/ql/src/test/results/clientpositive/union_remove_11.q.out
index 362e093..2ab83dc 100644
--- a/ql/src/test/results/clientpositive/union_remove_11.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_11.q.out
@@ -236,7 +236,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_12.q.out b/ql/src/test/results/clientpositive/union_remove_12.q.out
index 74cb2a4..82dbcdb 100644
--- a/ql/src/test/results/clientpositive/union_remove_12.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_12.q.out
@@ -227,7 +227,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_13.q.out b/ql/src/test/results/clientpositive/union_remove_13.q.out
index b12d7cd..5d8433e 100644
--- a/ql/src/test/results/clientpositive/union_remove_13.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_13.q.out
@@ -250,7 +250,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_14.q.out b/ql/src/test/results/clientpositive/union_remove_14.q.out
index 02abe09..4760f29 100644
--- a/ql/src/test/results/clientpositive/union_remove_14.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_14.q.out
@@ -229,7 +229,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_15.q.out b/ql/src/test/results/clientpositive/union_remove_15.q.out
index 76f8c6c..a259df8 100644
--- a/ql/src/test/results/clientpositive/union_remove_15.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_15.q.out
@@ -212,7 +212,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_16.q.out b/ql/src/test/results/clientpositive/union_remove_16.q.out
index 28c4c2d..c7a08f3 100644
--- a/ql/src/test/results/clientpositive/union_remove_16.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_16.q.out
@@ -244,7 +244,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_17.q.out b/ql/src/test/results/clientpositive/union_remove_17.q.out
index 476ca3a..688e365 100644
--- a/ql/src/test/results/clientpositive/union_remove_17.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_17.q.out
@@ -167,7 +167,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_18.q.out b/ql/src/test/results/clientpositive/union_remove_18.q.out
index 27986ba..96daa12 100644
--- a/ql/src/test/results/clientpositive/union_remove_18.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_18.q.out
@@ -222,7 +222,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_19.q.out b/ql/src/test/results/clientpositive/union_remove_19.q.out
index 2a1fd55..c1f688e 100644
--- a/ql/src/test/results/clientpositive/union_remove_19.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_19.q.out
@@ -192,7 +192,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_2.q.out b/ql/src/test/results/clientpositive/union_remove_2.q.out
index 78a6a48..29e5d6c 100644
--- a/ql/src/test/results/clientpositive/union_remove_2.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_2.q.out
@@ -199,7 +199,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_20.q.out b/ql/src/test/results/clientpositive/union_remove_20.q.out
index 922ba4c..1da81a7 100644
--- a/ql/src/test/results/clientpositive/union_remove_20.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_20.q.out
@@ -198,7 +198,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_21.q.out b/ql/src/test/results/clientpositive/union_remove_21.q.out
index 4eac30d..4743d8d 100644
--- a/ql/src/test/results/clientpositive/union_remove_21.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_21.q.out
@@ -182,7 +182,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_22.q.out b/ql/src/test/results/clientpositive/union_remove_22.q.out
index 01cce7a..3f13991 100644
--- a/ql/src/test/results/clientpositive/union_remove_22.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_22.q.out
@@ -202,7 +202,6 @@ values2             	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_23.q.out b/ql/src/test/results/clientpositive/union_remove_23.q.out
index 71ffa1b..cdbe914 100644
--- a/ql/src/test/results/clientpositive/union_remove_23.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_23.q.out
@@ -230,7 +230,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_24.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_24.q.out b/ql/src/test/results/clientpositive/union_remove_24.q.out
index 7eaff6f..49086e4 100644
--- a/ql/src/test/results/clientpositive/union_remove_24.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_24.q.out
@@ -194,7 +194,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_25.q.out b/ql/src/test/results/clientpositive/union_remove_25.q.out
index 78c1f07..37d6a53 100644
--- a/ql/src/test/results/clientpositive/union_remove_25.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_25.q.out
@@ -213,8 +213,6 @@ Partition Value:    	[2004]
 Database:           	default             	 
 Table:              	outputtbl1          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	2                   
@@ -419,8 +417,6 @@ Partition Value:    	[2008-04-08]
 Database:           	default             	 
 Table:              	outputtbl2          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	2                   
@@ -609,8 +605,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	outputtbl3          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_3.q.out b/ql/src/test/results/clientpositive/union_remove_3.q.out
index f4cbf7b..7045a26 100644
--- a/ql/src/test/results/clientpositive/union_remove_3.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_3.q.out
@@ -188,7 +188,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_4.q.out b/ql/src/test/results/clientpositive/union_remove_4.q.out
index 1946cb6..c545dd4 100644
--- a/ql/src/test/results/clientpositive/union_remove_4.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_4.q.out
@@ -232,7 +232,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_5.q.out b/ql/src/test/results/clientpositive/union_remove_5.q.out
index b065ba0..1308c09 100644
--- a/ql/src/test/results/clientpositive/union_remove_5.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_5.q.out
@@ -245,7 +245,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_7.q.out b/ql/src/test/results/clientpositive/union_remove_7.q.out
index e00ba3a..61bef8b 100644
--- a/ql/src/test/results/clientpositive/union_remove_7.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_7.q.out
@@ -192,7 +192,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_8.q.out b/ql/src/test/results/clientpositive/union_remove_8.q.out
index 64cf98b..62af170 100644
--- a/ql/src/test/results/clientpositive/union_remove_8.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_8.q.out
@@ -203,7 +203,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/union_remove_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_9.q.out b/ql/src/test/results/clientpositive/union_remove_9.q.out
index bf7f8de..c0fc54d 100644
--- a/ql/src/test/results/clientpositive/union_remove_9.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_9.q.out
@@ -250,7 +250,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 


[07/50] [abbrv] hive git commit: HIVE-11203: Beeline force option doesn't force execution when errors occurred in a script.(Ferdinand, reviewed by Xuefu Zhang)

Posted by xu...@apache.org.
HIVE-11203: Beeline force option doesn't force execution when errors occurred in a script.(Ferdinand, reviewed by Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/68eab648
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/68eab648
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/68eab648

Branch: refs/heads/beeline-cli
Commit: 68eab64859de0a484de97a5645e54e809d1a6e68
Parents: f664789
Author: Ferdinand Xu <ch...@intel.com>
Authored: Wed Jul 8 20:58:22 2015 -0400
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Wed Jul 8 20:58:22 2015 -0400

----------------------------------------------------------------------
 beeline/src/java/org/apache/hive/beeline/BeeLine.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/68eab648/beeline/src/java/org/apache/hive/beeline/BeeLine.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 8928c0c..c760c94 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -792,7 +792,7 @@ public class BeeLine implements Closeable {
     FileInputStream initStream = null;
     try {
       initStream = new FileInputStream(fileName);
-      return execute(getConsoleReader(initStream), true);
+      return execute(getConsoleReader(initStream), !getOpts().getForce());
     } catch (Throwable t) {
       handleException(t);
       return ERRNO_OTHER;


[04/50] [abbrv] hive git commit: HIVE-11202 : Update golden files on master (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by xu...@apache.org.
HIVE-11202 : Update golden files on master (Ashutosh Chauhan via Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7df153d3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7df153d3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7df153d3

Branch: refs/heads/beeline-cli
Commit: 7df153d321b17d216ee7dbb5f51c4d7d89941cc3
Parents: 527497c
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Wed Jul 8 08:35:04 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Jul 8 08:36:38 2015 -0700

----------------------------------------------------------------------
 .../join_merge_multi_expressions.q.out          |  46 ++--
 .../spark/join_merge_multi_expressions.q.out    |  46 ++--
 .../clientpositive/spark/louter_join_ppr.q.out  | 260 ++++++-------------
 .../spark/outer_join_ppr.q.java1.7.out          |  88 ++++---
 4 files changed, 187 insertions(+), 253 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7df153d3/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
index a8bd4df..b73643e 100644
--- a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
+++ b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
@@ -21,42 +21,54 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: key (type: string), hr (type: string)
-                sort order: ++
-                Map-reduce partition columns: key (type: string), hr (type: string)
+              Select Operator
+                expressions: key (type: string), hr (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
           TableScan
-            alias: b
+            alias: a
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: key (type: string), hr (type: string)
-                sort order: ++
-                Map-reduce partition columns: key (type: string), hr (type: string)
+              Select Operator
+                expressions: key (type: string), hr (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
           TableScan
-            alias: c
+            alias: a
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: key (type: string), hr (type: string)
-                sort order: ++
-                Map-reduce partition columns: key (type: string), hr (type: string)
+              Select Operator
+                expressions: key (type: string), hr (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
                Inner Join 0 to 2
           keys:
-            0 key (type: string), hr (type: string)
-            1 key (type: string), hr (type: string)
-            2 key (type: string), hr (type: string)
+            0 _col0 (type: string), _col1 (type: string)
+            1 _col0 (type: string), _col1 (type: string)
+            2 _col0 (type: string), _col1 (type: string)
           Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
           Group By Operator
             aggregations: count()

http://git-wip-us.apache.org/repos/asf/hive/blob/7df153d3/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out b/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out
index 3b3358f..a18d82e 100644
--- a/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out
@@ -26,37 +26,49 @@ STAGE PLANS:
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: key (type: string), hr (type: string)
-                      sort order: ++
-                      Map-reduce partition columns: key (type: string), hr (type: string)
+                    Select Operator
+                      expressions: key (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
-                  alias: b
+                  alias: a
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: key (type: string), hr (type: string)
-                      sort order: ++
-                      Map-reduce partition columns: key (type: string), hr (type: string)
+                    Select Operator
+                      expressions: key (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
         Map 5 
             Map Operator Tree:
                 TableScan
-                  alias: c
+                  alias: a
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: key (type: string), hr (type: string)
-                      sort order: ++
-                      Map-reduce partition columns: key (type: string), hr (type: string)
+                    Select Operator
+                      expressions: key (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -64,9 +76,9 @@ STAGE PLANS:
                      Inner Join 0 to 1
                      Inner Join 0 to 2
                 keys:
-                  0 key (type: string), hr (type: string)
-                  1 key (type: string), hr (type: string)
-                  2 key (type: string), hr (type: string)
+                  0 _col0 (type: string), _col1 (type: string)
+                  1 _col0 (type: string), _col1 (type: string)
+                  2 _col0 (type: string), _col1 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()

http://git-wip-us.apache.org/repos/asf/hive/blob/7df153d3/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
index c3eeb23..44f99c1 100644
--- a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
@@ -995,88 +995,25 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Filter Operator
-                    isSamplingPred: false
-                    predicate: ((key > 10) and (key < 20)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: key (type: string)
-                      sort order: +
-                      Map-reduce partition columns: key (type: string)
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                      tag: 0
-                      value expressions: value (type: string)
-                      auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE true
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [a]
-        Map 3 
-            Map Operator Tree:
-                TableScan
                   alias: b
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((key > 10) and (key < 20)) (type: boolean)
-                    Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: key (type: string)
-                      sort order: +
-                      Map-reduce partition columns: key (type: string)
-                      Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
-                      tag: 1
-                      value expressions: value (type: string), ds (type: string)
-                      auto parallelism: false
+                    predicate: (((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) and key is not null) (type: boolean)
+                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        tag: 0
+                        value expressions: _col1 (type: string)
+                        auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -1172,60 +1109,39 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.srcpart
                   name: default.srcpart
+            Truncated Path -> Alias:
+              /srcpart/ds=2008-04-08/hr=11 [b]
+              /srcpart/ds=2008-04-08/hr=12 [b]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) and key is not null) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+                        tag: 1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: false
+            Path -> Alias:
 #### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-09
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
+            Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: hr=12
+                  base file name: src
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-09
-                    hr 12
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -1233,13 +1149,11 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.srcpart
+                    name default.src
                     numFiles 1
                     numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.ddl struct src { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -1249,66 +1163,62 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
+                    name: default.src
+                  name: default.src
             Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [b]
-              /srcpart/ds=2008-04-08/hr=12 [b]
-              /srcpart/ds=2008-04-09/hr=11 [b]
-              /srcpart/ds=2008-04-09/hr=12 [b]
+              /src [a]
         Reducer 2 
             Needs Tagging: true
             Reduce Operator Tree:
               Join Operator
                 condition map:
-                     Left Outer Join0 to 1
+                     Inner Join 0 to 1
                 keys:
-                  0 key (type: string)
-                  1 key (type: string)
-                outputColumnNames: _col0, _col1, _col5, _col6, _col7
-                Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  isSamplingPred: false
-                  predicate: (((_col5 > 15) and (_col5 < 25)) and (_col7 = '2008-04-08')) (type: boolean)
-                  Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col3, _col4
+                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col3 (type: string), _col4 (type: string), _col0 (type: string), _col1 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
 #### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          properties:
-                            columns _col0,_col1,_col2,_col3
-                            columns.types string:string:string:string
-                            escape.delim \
-                            hive.serialization.extend.additional.nesting.levels true
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      TotalFiles: 1
-                      GatherStats: false
-                      MultiFileSpray: false
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
 
   Stage: Stage-0
     Fetch Operator
@@ -1328,8 +1238,6 @@ PREHOOK: Input: default@src
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: FROM 
   src a
@@ -1343,8 +1251,6 @@ POSTHOOK: Input: default@src
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 17	val_17	17	val_17
 17	val_17	17	val_17

http://git-wip-us.apache.org/repos/asf/hive/blob/7df153d3/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
index 588d2ed..cb30993 100644
--- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
@@ -123,14 +123,18 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
-                  Reduce Output Operator
-                    key expressions: key (type: string)
-                    sort order: +
-                    Map-reduce partition columns: key (type: string)
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    tag: 0
-                    value expressions: value (type: string)
-                    auto parallelism: false
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: _col1 (type: string)
+                      auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -186,14 +190,18 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
-                  Reduce Output Operator
-                    key expressions: key (type: string)
-                    sort order: +
-                    Map-reduce partition columns: key (type: string)
+                  Select Operator
+                    expressions: key (type: string), value (type: string), ds (type: string)
+                    outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    tag: 1
-                    value expressions: value (type: string), ds (type: string)
-                    auto parallelism: false
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: _col1 (type: string), _col2 (type: string)
+                      auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -398,39 +406,35 @@ STAGE PLANS:
                   0 
                   1 {(VALUE._col1 = '2008-04-08')}
                 keys:
-                  0 key (type: string)
-                  1 key (type: string)
-                outputColumnNames: _col0, _col1, _col5, _col6
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   isSamplingPred: false
-                  predicate: ((((_col5 > 15) and (_col5 < 25)) and (_col0 > 10)) and (_col0 < 20)) (type: boolean)
+                  predicate: ((((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) and (UDFToDouble(_col2) > 15.0)) and (UDFToDouble(_col2) < 25.0)) (type: boolean)
                   Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
                     Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          properties:
-                            columns _col0,_col1,_col2,_col3
-                            columns.types string:string:string:string
-                            escape.delim \
-                            hive.serialization.extend.additional.nesting.levels true
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      TotalFiles: 1
-                      GatherStats: false
-                      MultiFileSpray: false
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
 
   Stage: Stage-0
     Fetch Operator


[06/50] [abbrv] hive git commit: HIVE-11164 WebHCat should log contents of HiveConf on startup (Eugene Koifman, reviewed by Thejas Nair)

Posted by xu...@apache.org.
HIVE-11164 WebHCat should log contents of HiveConf on startup (Eugene Koifman, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f6647897
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f6647897
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f6647897

Branch: refs/heads/beeline-cli
Commit: f664789737d516ac664462732664121acc111a1e
Parents: f6ea8cb
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Wed Jul 8 17:53:55 2015 -0700
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Wed Jul 8 17:53:55 2015 -0700

----------------------------------------------------------------------
 .../hive/hcatalog/templeton/AppConfig.java      | 28 +++++++++++++++-----
 .../hcatalog/templeton/SecureProxySupport.java  |  6 +++--
 2 files changed, 25 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f6647897/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
index 8244274..062d5a0 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
@@ -239,8 +239,24 @@ public class AppConfig extends Configuration {
   private String dumpEnvironent() {
     StringBuilder sb = TempletonUtils.dumpPropMap("========WebHCat System.getenv()========", System.getenv());
     sb.append("START========WebHCat AppConfig.iterator()========: \n");
-    Iterator<Map.Entry<String, String>> configIter = this.iterator();
-    List<Map.Entry<String, String>> configVals = new ArrayList<Map.Entry<String, String>>();
+    dumpConfig(this, sb);
+    sb.append("END========WebHCat AppConfig.iterator()========: \n");
+
+    sb.append(TempletonUtils.dumpPropMap("========WebHCat System.getProperties()========", System.getProperties()));
+
+    sb.append("START========\"new HiveConf()\"========\n");
+    HiveConf c = new HiveConf();
+    sb.append("hiveDefaultUrl=").append(c.getHiveDefaultLocation()).append('\n');
+    sb.append("hiveSiteURL=").append(HiveConf.getHiveSiteLocation()).append('\n');
+    sb.append("hiveServer2SiteUrl=").append(HiveConf.getHiveServer2SiteLocation()).append('\n');
+    sb.append("hivemetastoreSiteUrl=").append(HiveConf.getMetastoreSiteLocation()).append('\n');
+    dumpConfig(c, sb);
+    sb.append("END========\"new HiveConf()\"========\n");
+    return sb.toString();
+  }
+  private static void dumpConfig(Configuration conf, StringBuilder sb) {
+    Iterator<Map.Entry<String, String>> configIter = conf.iterator();
+    List<Map.Entry<String, String>>configVals = new ArrayList<>();
     while(configIter.hasNext()) {
       configVals.add(configIter.next());
     }
@@ -253,20 +269,18 @@ public class AppConfig extends Configuration {
     for(Map.Entry<String, String> entry : configVals) {
       //use get() to make sure variable substitution works
       if(entry.getKey().toLowerCase().contains("path")) {
-        StringTokenizer st = new StringTokenizer(get(entry.getKey()), File.pathSeparator);
+        StringTokenizer st = new StringTokenizer(conf.get(entry.getKey()), File.pathSeparator);
         sb.append(entry.getKey()).append("=\n");
         while(st.hasMoreTokens()) {
           sb.append("    ").append(st.nextToken()).append(File.pathSeparator).append('\n');
         }
       }
       else {
-        sb.append(entry.getKey()).append('=').append(get(entry.getKey())).append('\n');
+        sb.append(entry.getKey()).append('=').append(conf.get(entry.getKey())).append('\n');
       }
     }
-    sb.append("END========WebHCat AppConfig.iterator()========: \n");
-    sb.append(TempletonUtils.dumpPropMap("========WebHCat System.getProperties()========", System.getProperties()));
-    return sb.toString();
   }
+
   public void startCleanup() {
     JobState.getStorageInstance(this).startCleanup(this);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/f6647897/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
index b4687b5..1ef5f27 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
@@ -86,6 +86,9 @@ public class SecureProxySupport {
       } catch (Exception e) {
         throw new IOException(e);
       }
+      if(hcatTokenStr == null) {
+        LOG.error("open(" + user + ") token=null");
+      }
       Token<?> msToken = new Token();
       msToken.decodeFromUrlString(hcatTokenStr);
       msToken.setService(new Text(HCAT_SERVICE));
@@ -175,11 +178,10 @@ public class SecureProxySupport {
   }
 
   private String buildHcatDelegationToken(String user)
-    throws IOException, InterruptedException, MetaException, TException {
+    throws IOException, InterruptedException, TException {
     final HiveConf c = new HiveConf();
     final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c);
     LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
-    final TokenWrapper twrapper = new TokenWrapper();
     final UserGroupInformation ugi = UgiFactory.getUgi(user);
     String s = ugi.doAs(new PrivilegedExceptionAction<String>() {
       public String run()


[02/50] [abbrv] hive git commit: HIVE-10553: Remove hardcoded Parquet references from SearchArgumentImpl (Owen O'Malley reviewed by Prasanth Jayachandran)

Posted by xu...@apache.org.
HIVE-10553: Remove hardcoded Parquet references from SearchArgumentImpl (Owen O'Malley reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1280ccae
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1280ccae
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1280ccae

Branch: refs/heads/beeline-cli
Commit: 1280ccaeed2912d20a92df4fa609c8aa5b79f35d
Parents: 02e762f
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Tue Jul 7 23:07:15 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Tue Jul 7 23:07:15 2015 -0700

----------------------------------------------------------------------
 .../read/ParquetRecordReaderWrapper.java        | 102 +++++-
 .../hive/ql/io/sarg/SearchArgumentImpl.java     | 343 ++++---------------
 .../hive/ql/io/sarg/TestSearchArgumentImpl.java |  32 +-
 .../hadoop/hive/ql/io/sarg/ExpressionTree.java  | 157 +++++++++
 .../hadoop/hive/ql/io/sarg/SearchArgument.java  |  14 +-
 5 files changed, 349 insertions(+), 299 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1280ccae/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
index 80a7301..a64ec06 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
@@ -24,7 +24,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.IOConstants;
+import org.apache.hadoop.hive.ql.io.parquet.FilterPredicateLeafBuilder;
+import org.apache.hadoop.hive.ql.io.parquet.LeafFilterFactory;
 import org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher;
+import org.apache.hadoop.hive.ql.io.sarg.ExpressionTree;
+import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
@@ -41,6 +46,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 import org.apache.parquet.filter2.compat.FilterCompat;
 import org.apache.parquet.filter2.compat.RowGroupFilter;
+import org.apache.parquet.filter2.predicate.FilterApi;
 import org.apache.parquet.filter2.predicate.FilterPredicate;
 import org.apache.parquet.hadoop.ParquetFileReader;
 import org.apache.parquet.hadoop.ParquetInputFormat;
@@ -142,9 +148,10 @@ public class ParquetRecordReaderWrapper  implements RecordReader<NullWritable, A
       return null;
     }
 
-    FilterPredicate p =
-      SearchArgumentFactory.create(Utilities.deserializeExpression(serializedPushdown))
-        .toFilterPredicate();
+    SearchArgument sarg =
+        SearchArgumentFactory.create(Utilities.deserializeExpression
+            (serializedPushdown));
+    FilterPredicate p = toFilterPredicate(sarg);
     if (p != null) {
       LOG.debug("Predicate filter for parquet is " + p.toString());
       ParquetInputFormat.setFilterPredicate(conf, p);
@@ -303,4 +310,93 @@ public class ParquetRecordReaderWrapper  implements RecordReader<NullWritable, A
   public List<BlockMetaData> getFiltedBlocks() {
     return filtedBlocks;
   }
+
+  /**
+   * Translate the search argument to the filter predicate parquet used
+   * @return translate the sarg into a filter predicate
+   */
+  public static FilterPredicate toFilterPredicate(SearchArgument sarg) {
+    return translate(sarg.getExpression(),
+        sarg.getLeaves());
+  }
+
+  private static boolean isMultiLiteralsOperator(PredicateLeaf.Operator op) {
+    return (op == PredicateLeaf.Operator.IN) ||
+        (op == PredicateLeaf.Operator.BETWEEN);
+  }
+
+  private static FilterPredicate translate(ExpressionTree root,
+                                           List<PredicateLeaf> leafs){
+    FilterPredicate p = null;
+    switch (root.getOperator()) {
+      case OR:
+        for(ExpressionTree child: root.getChildren()) {
+          if (p == null) {
+            p = translate(child, leafs);
+          } else {
+            FilterPredicate right = translate(child, leafs);
+            // constant means no filter, ignore it when it is null
+            if(right != null){
+              p = FilterApi.or(p, right);
+            }
+          }
+        }
+        return p;
+      case AND:
+        for(ExpressionTree child: root.getChildren()) {
+          if (p == null) {
+            p = translate(child, leafs);
+          } else {
+            FilterPredicate right = translate(child, leafs);
+            // constant means no filter, ignore it when it is null
+            if(right != null){
+              p = FilterApi.and(p, right);
+            }
+          }
+        }
+        return p;
+      case NOT:
+        FilterPredicate op = translate(root.getChildren().get(0), leafs);
+        if (op != null) {
+          return FilterApi.not(op);
+        } else {
+          return null;
+        }
+      case LEAF:
+        return buildFilterPredicateFromPredicateLeaf(leafs.get(root.getLeaf()));
+      case CONSTANT:
+        return null;// no filter will be executed for constant
+      default:
+        throw new IllegalStateException("Unknown operator: " +
+            root.getOperator());
+    }
+  }
+
+  private static FilterPredicate buildFilterPredicateFromPredicateLeaf
+          (PredicateLeaf leaf) {
+    LeafFilterFactory leafFilterFactory = new LeafFilterFactory();
+    FilterPredicateLeafBuilder builder;
+    try {
+      builder = leafFilterFactory
+          .getLeafFilterBuilderByType(leaf.getType());
+      if (builder == null) {
+        return null;
+      }
+      if (isMultiLiteralsOperator(leaf.getOperator())) {
+        return builder.buildPredicate(leaf.getOperator(),
+            leaf.getLiteralList(),
+            leaf.getColumnName());
+      } else {
+        return builder
+            .buildPredict(leaf.getOperator(),
+                leaf.getLiteral(),
+                leaf.getColumnName());
+      }
+    } catch (Exception e) {
+      LOG.error("fail to build predicate filter leaf with errors" + e, e);
+      return null;
+    }
+  }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1280ccae/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
index 782b5f8..46f1e4e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
@@ -22,7 +22,6 @@ import java.math.BigDecimal;
 import java.sql.Timestamp;
 import java.util.ArrayDeque;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Deque;
 import java.util.HashMap;
 import java.util.List;
@@ -35,8 +34,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
-import org.apache.hadoop.hive.ql.io.parquet.FilterPredicateLeafBuilder;
-import org.apache.hadoop.hive.ql.io.parquet.LeafFilterFactory;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -64,9 +61,6 @@ import com.esotericsoftware.kryo.Kryo;
 import com.esotericsoftware.kryo.io.Input;
 import com.esotericsoftware.kryo.io.Output;
 
-import org.apache.parquet.filter2.predicate.FilterApi;
-import org.apache.parquet.filter2.predicate.FilterPredicate;
-
 /**
  * The implementation of SearchArguments.
  */
@@ -188,199 +182,6 @@ final class SearchArgumentImpl implements SearchArgument {
     }
   }
 
-  static class ExpressionTree {
-    static enum Operator {OR, AND, NOT, LEAF, CONSTANT}
-    private final Operator operator;
-    private final List<ExpressionTree> children;
-    private final int leaf;
-    private final TruthValue constant;
-
-    ExpressionTree() {
-      operator = null;
-      children = null;
-      leaf = 0;
-      constant = null;
-    }
-
-    ExpressionTree(Operator op, ExpressionTree... kids) {
-      operator = op;
-      children = new ArrayList<ExpressionTree>();
-      leaf = -1;
-      this.constant = null;
-      Collections.addAll(children, kids);
-    }
-
-    ExpressionTree(int leaf) {
-      operator = Operator.LEAF;
-      children = null;
-      this.leaf = leaf;
-      this.constant = null;
-    }
-
-    ExpressionTree(TruthValue constant) {
-      operator = Operator.CONSTANT;
-      children = null;
-      this.leaf = -1;
-      this.constant = constant;
-    }
-
-    ExpressionTree(ExpressionTree other) {
-      this.operator = other.operator;
-      if (other.children == null) {
-        this.children = null;
-      } else {
-        this.children = new ArrayList<ExpressionTree>();
-        for(ExpressionTree child: other.children) {
-          children.add(new ExpressionTree(child));
-        }
-      }
-      this.leaf = other.leaf;
-      this.constant = other.constant;
-    }
-
-    TruthValue evaluate(TruthValue[] leaves) {
-      TruthValue result = null;
-      switch (operator) {
-        case OR:
-          for(ExpressionTree child: children) {
-            result = child.evaluate(leaves).or(result);
-          }
-          return result;
-        case AND:
-          for(ExpressionTree child: children) {
-            result = child.evaluate(leaves).and(result);
-          }
-          return result;
-        case NOT:
-          return children.get(0).evaluate(leaves).not();
-        case LEAF:
-          return leaves[leaf];
-        case CONSTANT:
-          return constant;
-        default:
-          throw new IllegalStateException("Unknown operator: " + operator);
-      }
-    }
-
-    FilterPredicate translate(List<PredicateLeaf> leafs){
-      FilterPredicate p = null;
-      switch (operator) {
-        case OR:
-          for(ExpressionTree child: children) {
-            if (p == null) {
-              p = child.translate(leafs);
-            } else {
-              FilterPredicate right = child.translate(leafs);
-              // constant means no filter, ignore it when it is null
-              if(right != null){
-                p = FilterApi.or(p, right);
-              }
-            }
-          }
-          return p;
-        case AND:
-          for(ExpressionTree child: children) {
-            if (p == null) {
-              p = child.translate(leafs);
-            } else {
-              FilterPredicate right = child.translate(leafs);
-              // constant means no filter, ignore it when it is null
-              if(right != null){
-                p = FilterApi.and(p, right);
-              }
-            }
-          }
-          return p;
-        case NOT:
-          FilterPredicate op = children.get(0).translate(leafs);
-          if (op != null) {
-            return FilterApi.not(op);
-          } else {
-            return null;
-          }
-        case LEAF:
-          return buildFilterPredicateFromPredicateLeaf(leafs.get(leaf));
-        case CONSTANT:
-          return null;// no filter will be executed for constant
-        default:
-          throw new IllegalStateException("Unknown operator: " + operator);
-      }
-    }
-
-    private FilterPredicate buildFilterPredicateFromPredicateLeaf(PredicateLeaf leaf) {
-      LeafFilterFactory leafFilterFactory = new LeafFilterFactory();
-      FilterPredicateLeafBuilder builder;
-      try {
-        builder = leafFilterFactory
-          .getLeafFilterBuilderByType(leaf.getType());
-        if (builder == null) {
-          return null;
-        }
-        if (isMultiLiteralsOperator(leaf.getOperator())) {
-          return builder.buildPredicate(leaf.getOperator(),
-              leaf.getLiteralList(),
-              leaf.getColumnName());
-        } else {
-          return builder
-            .buildPredict(leaf.getOperator(),
-              leaf.getLiteral(),
-              leaf.getColumnName());
-        }
-      } catch (Exception e) {
-        LOG.error("fail to build predicate filter leaf with errors" + e, e);
-        return null;
-      }
-    }
-
-    private boolean isMultiLiteralsOperator(PredicateLeaf.Operator op) {
-      return (op == PredicateLeaf.Operator.IN) || (op == PredicateLeaf.Operator.BETWEEN);
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder buffer = new StringBuilder();
-      switch (operator) {
-        case OR:
-          buffer.append("(or");
-          for(ExpressionTree child: children) {
-            buffer.append(' ');
-            buffer.append(child.toString());
-          }
-          buffer.append(')');
-          break;
-        case AND:
-          buffer.append("(and");
-          for(ExpressionTree child: children) {
-            buffer.append(' ');
-            buffer.append(child.toString());
-          }
-          buffer.append(')');
-          break;
-        case NOT:
-          buffer.append("(not ");
-          buffer.append(children.get(0));
-          buffer.append(')');
-          break;
-        case LEAF:
-          buffer.append("leaf-");
-          buffer.append(leaf);
-          break;
-        case CONSTANT:
-          buffer.append(constant);
-          break;
-      }
-      return buffer.toString();
-    }
-
-    Operator getOperator() {
-      return operator;
-    }
-
-    List<ExpressionTree> getChildren() {
-      return children;
-    }
-  }
-
   static class ExpressionBuilder {
     // max threshold for CNF conversion. having >8 elements in andList will be converted to maybe
     private static final int CNF_COMBINATIONS_THRESHOLD = 256;
@@ -587,7 +388,7 @@ final class SearchArgumentImpl implements SearchArgument {
 
     private ExpressionTree negate(ExpressionTree expr) {
       ExpressionTree result = new ExpressionTree(ExpressionTree.Operator.NOT);
-      result.children.add(expr);
+      result.getChildren().add(expr);
       return result;
     }
 
@@ -595,7 +396,7 @@ final class SearchArgumentImpl implements SearchArgument {
                              ExprNodeGenericFuncDesc node,
                              List<PredicateLeaf> leafCache) {
       for(ExprNodeDesc child: node.getChildren()) {
-        result.children.add(parse(child, leafCache));
+        result.getChildren().add(parse(child, leafCache));
       }
     }
 
@@ -671,24 +472,24 @@ final class SearchArgumentImpl implements SearchArgument {
      * nodes of the original expression.
      */
     static ExpressionTree pushDownNot(ExpressionTree root) {
-      if (root.operator == ExpressionTree.Operator.NOT) {
-        ExpressionTree child = root.children.get(0);
-        switch (child.operator) {
+      if (root.getOperator() == ExpressionTree.Operator.NOT) {
+        ExpressionTree child = root.getChildren().get(0);
+        switch (child.getOperator()) {
           case NOT:
-            return pushDownNot(child.children.get(0));
+            return pushDownNot(child.getChildren().get(0));
           case CONSTANT:
-            return  new ExpressionTree(child.constant.not());
+            return  new ExpressionTree(child.getConstant().not());
           case AND:
             root = new ExpressionTree(ExpressionTree.Operator.OR);
-            for(ExpressionTree kid: child.children) {
-              root.children.add(pushDownNot(new
+            for(ExpressionTree kid: child.getChildren()) {
+              root.getChildren().add(pushDownNot(new
                   ExpressionTree(ExpressionTree.Operator.NOT, kid)));
             }
             break;
           case OR:
             root = new ExpressionTree(ExpressionTree.Operator.AND);
-            for(ExpressionTree kid: child.children) {
-              root.children.add(pushDownNot(new ExpressionTree
+            for(ExpressionTree kid: child.getChildren()) {
+              root.getChildren().add(pushDownNot(new ExpressionTree
                   (ExpressionTree.Operator.NOT, kid)));
             }
             break;
@@ -696,10 +497,10 @@ final class SearchArgumentImpl implements SearchArgument {
           default:
             break;
         }
-      } else if (root.children != null) {
+      } else if (root.getChildren() != null) {
         // iterate through children and push down not for each one
-        for(int i=0; i < root.children.size(); ++i) {
-          root.children.set(i, pushDownNot(root.children.get(i)));
+        for(int i=0; i < root.getChildren().size(); ++i) {
+          root.getChildren().set(i, pushDownNot(root.getChildren().get(i)));
         }
       }
       return root;
@@ -713,13 +514,13 @@ final class SearchArgumentImpl implements SearchArgument {
      * @return The cleaned up expression
      */
     static ExpressionTree foldMaybe(ExpressionTree expr) {
-      if (expr.children != null) {
-        for(int i=0; i < expr.children.size(); ++i) {
-          ExpressionTree child = foldMaybe(expr.children.get(i));
-          if (child.constant == TruthValue.YES_NO_NULL) {
-            switch (expr.operator) {
+      if (expr.getChildren() != null) {
+        for(int i=0; i < expr.getChildren().size(); ++i) {
+          ExpressionTree child = foldMaybe(expr.getChildren().get(i));
+          if (child.getConstant() == TruthValue.YES_NO_NULL) {
+            switch (expr.getOperator()) {
               case AND:
-                expr.children.remove(i);
+                expr.getChildren().remove(i);
                 i -= 1;
                 break;
               case OR:
@@ -730,10 +531,10 @@ final class SearchArgumentImpl implements SearchArgument {
                   expr);
             }
           } else {
-            expr.children.set(i, child);
+            expr.getChildren().set(i, child);
           }
         }
-        if (expr.children.isEmpty()) {
+        if (expr.getChildren().isEmpty()) {
           return new ExpressionTree(TruthValue.YES_NO_NULL);
         }
       }
@@ -754,15 +555,15 @@ final class SearchArgumentImpl implements SearchArgument {
                                                 List<ExpressionTree> andList,
                                                 List<ExpressionTree> nonAndList
                                                ) {
-      List<ExpressionTree> kids = andList.get(0).children;
+      List<ExpressionTree> kids = andList.get(0).getChildren();
       if (result.isEmpty()) {
         for(ExpressionTree kid: kids) {
           ExpressionTree or = new ExpressionTree(ExpressionTree.Operator.OR);
           result.add(or);
           for(ExpressionTree node: nonAndList) {
-            or.children.add(new ExpressionTree(node));
+            or.getChildren().add(new ExpressionTree(node));
           }
-          or.children.add(kid);
+          or.getChildren().add(kid);
         }
       } else {
         List<ExpressionTree> work = new ArrayList<ExpressionTree>(result);
@@ -770,7 +571,7 @@ final class SearchArgumentImpl implements SearchArgument {
         for(ExpressionTree kid: kids) {
           for(ExpressionTree or: work) {
             ExpressionTree copy = new ExpressionTree(or);
-            copy.children.add(kid);
+            copy.getChildren().add(kid);
             result.add(copy);
           }
         }
@@ -789,23 +590,23 @@ final class SearchArgumentImpl implements SearchArgument {
      * @return the normalized expression
      */
     static ExpressionTree convertToCNF(ExpressionTree root) {
-      if (root.children != null) {
+      if (root.getChildren() != null) {
         // convert all of the children to CNF
-        int size = root.children.size();
+        int size = root.getChildren().size();
         for(int i=0; i < size; ++i) {
-          root.children.set(i, convertToCNF(root.children.get(i)));
+          root.getChildren().set(i, convertToCNF(root.getChildren().get(i)));
         }
-        if (root.operator == ExpressionTree.Operator.OR) {
+        if (root.getOperator() == ExpressionTree.Operator.OR) {
           // a list of leaves that weren't under AND expressions
           List<ExpressionTree> nonAndList = new ArrayList<ExpressionTree>();
           // a list of AND expressions that we need to distribute
           List<ExpressionTree> andList = new ArrayList<ExpressionTree>();
-          for(ExpressionTree child: root.children) {
-            if (child.operator == ExpressionTree.Operator.AND) {
+          for(ExpressionTree child: root.getChildren()) {
+            if (child.getOperator() == ExpressionTree.Operator.AND) {
               andList.add(child);
-            } else if (child.operator == ExpressionTree.Operator.OR) {
+            } else if (child.getOperator() == ExpressionTree.Operator.OR) {
               // pull apart the kids of the OR expression
-              for(ExpressionTree grandkid: child.children) {
+              for(ExpressionTree grandkid: child.getChildren()) {
                 nonAndList.add(grandkid);
               }
             } else {
@@ -815,7 +616,7 @@ final class SearchArgumentImpl implements SearchArgument {
           if (!andList.isEmpty()) {
             if (checkCombinationsThreshold(andList)) {
               root = new ExpressionTree(ExpressionTree.Operator.AND);
-              generateAllCombinations(root.children, andList, nonAndList);
+              generateAllCombinations(root.getChildren(), andList, nonAndList);
             } else {
               root = new ExpressionTree(TruthValue.YES_NO_NULL);
             }
@@ -828,7 +629,7 @@ final class SearchArgumentImpl implements SearchArgument {
     private static boolean checkCombinationsThreshold(List<ExpressionTree> andList) {
       int numComb = 1;
       for (ExpressionTree tree : andList) {
-        numComb *= tree.children.size();
+        numComb *= tree.getChildren().size();
         if (numComb > CNF_COMBINATIONS_THRESHOLD) {
           return false;
         }
@@ -843,33 +644,33 @@ final class SearchArgumentImpl implements SearchArgument {
      *   potentially modified children.
      */
     static ExpressionTree flatten(ExpressionTree root) {
-      if (root.children != null) {
+      if (root.getChildren() != null) {
         // iterate through the index, so that if we add more children,
         // they don't get re-visited
-        for(int i=0; i < root.children.size(); ++i) {
-          ExpressionTree child = flatten(root.children.get(i));
+        for(int i=0; i < root.getChildren().size(); ++i) {
+          ExpressionTree child = flatten(root.getChildren().get(i));
           // do we need to flatten?
-          if (child.operator == root.operator &&
-              child.operator != ExpressionTree.Operator.NOT) {
+          if (child.getOperator() == root.getOperator() &&
+              child.getOperator() != ExpressionTree.Operator.NOT) {
             boolean first = true;
-            for(ExpressionTree grandkid: child.children) {
+            for(ExpressionTree grandkid: child.getChildren()) {
               // for the first grandkid replace the original parent
               if (first) {
                 first = false;
-                root.children.set(i, grandkid);
+                root.getChildren().set(i, grandkid);
               } else {
-                root.children.add(++i, grandkid);
+                root.getChildren().add(++i, grandkid);
               }
             }
           } else {
-            root.children.set(i, child);
+            root.getChildren().set(i, child);
           }
         }
         // if we have a singleton AND or OR, just return the child
-        if ((root.operator == ExpressionTree.Operator.OR ||
-             root.operator == ExpressionTree.Operator.AND) &&
-            root.children.size() == 1) {
-          return root.children.get(0);
+        if ((root.getOperator() == ExpressionTree.Operator.OR ||
+             root.getOperator() == ExpressionTree.Operator.AND) &&
+            root.getChildren().size() == 1) {
+          return root.getChildren().get(0);
         }
       }
       return root;
@@ -888,13 +689,13 @@ final class SearchArgumentImpl implements SearchArgument {
                                          List<PredicateLeaf> leafCache,
                                          Map<PredicateLeaf,
                                              ExpressionTree> lookup) {
-      if (expr.children != null) {
-        for(int i=0; i < expr.children.size(); ++i) {
-          expr.children.set(i, buildLeafList(expr.children.get(i), leafCache,
-              lookup));
+      if (expr.getChildren() != null) {
+        for(int i=0; i < expr.getChildren().size(); ++i) {
+          expr.getChildren().set(i, buildLeafList(expr.getChildren().get(i),
+              leafCache, lookup));
         }
-      } else if (expr.operator == ExpressionTree.Operator.LEAF) {
-        PredicateLeaf leaf = leafCache.get(expr.leaf);
+      } else if (expr.getOperator() == ExpressionTree.Operator.LEAF) {
+        PredicateLeaf leaf = leafCache.get(expr.getLeaf());
         ExpressionTree val = lookup.get(leaf);
         if (val == null) {
           val = new ExpressionTree(leaves.size());
@@ -975,7 +776,8 @@ final class SearchArgumentImpl implements SearchArgument {
     return expression == null ? TruthValue.YES : expression.evaluate(leaves);
   }
 
-  ExpressionTree getExpression() {
+  @Override
+  public ExpressionTree getExpression() {
     return expression;
   }
 
@@ -1006,11 +808,6 @@ final class SearchArgumentImpl implements SearchArgument {
     return new Kryo().readObject(input, SearchArgumentImpl.class);
   }
 
-  @Override
-  public FilterPredicate toFilterPredicate() {
-    return expression.translate(leaves);
-  }
-
   private static class BuilderImpl implements Builder {
     private final Deque<ExpressionTree> currentTree =
         new ArrayDeque<ExpressionTree>();
@@ -1022,7 +819,7 @@ final class SearchArgumentImpl implements SearchArgument {
       ExpressionTree node = new ExpressionTree(ExpressionTree.Operator.OR);
       if (currentTree.size() != 0) {
         ExpressionTree parent = currentTree.getFirst();
-        parent.children.add(node);
+        parent.getChildren().add(node);
       }
       currentTree.addFirst(node);
       return this;
@@ -1033,7 +830,7 @@ final class SearchArgumentImpl implements SearchArgument {
       ExpressionTree node = new ExpressionTree(ExpressionTree.Operator.AND);
       if (currentTree.size() != 0) {
         ExpressionTree parent = currentTree.getFirst();
-        parent.children.add(node);
+        parent.getChildren().add(node);
       }
       currentTree.addFirst(node);
       return this;
@@ -1044,7 +841,7 @@ final class SearchArgumentImpl implements SearchArgument {
       ExpressionTree node = new ExpressionTree(ExpressionTree.Operator.NOT);
       if (currentTree.size() != 0) {
         ExpressionTree parent = currentTree.getFirst();
-        parent.children.add(node);
+        parent.getChildren().add(node);
       }
       currentTree.addFirst(node);
       return this;
@@ -1053,12 +850,12 @@ final class SearchArgumentImpl implements SearchArgument {
     @Override
     public Builder end() {
       root = currentTree.removeFirst();
-      if (root.children.size() == 0) {
+      if (root.getChildren().size() == 0) {
         throw new IllegalArgumentException("Can't create expression " + root +
             " with no children.");
       }
-      if (root.operator == ExpressionTree.Operator.NOT &&
-          root.children.size() != 1) {
+      if (root.getOperator() == ExpressionTree.Operator.NOT &&
+          root.getChildren().size() != 1) {
         throw new IllegalArgumentException("Can't create not expression " +
             root + " with more than 1 child.");
       }
@@ -1127,7 +924,7 @@ final class SearchArgumentImpl implements SearchArgument {
           new PredicateLeafImpl(PredicateLeaf.Operator.LESS_THAN,
               getType(box), column, box, null);
       leaves.add(leaf);
-      parent.children.add(new ExpressionTree(leaves.size() - 1));
+      parent.getChildren().add(new ExpressionTree(leaves.size() - 1));
       return this;
     }
 
@@ -1139,7 +936,7 @@ final class SearchArgumentImpl implements SearchArgument {
           new PredicateLeafImpl(PredicateLeaf.Operator.LESS_THAN_EQUALS,
               getType(box), column, box, null);
       leaves.add(leaf);
-      parent.children.add(new ExpressionTree(leaves.size() - 1));
+      parent.getChildren().add(new ExpressionTree(leaves.size() - 1));
       return this;
     }
 
@@ -1151,7 +948,7 @@ final class SearchArgumentImpl implements SearchArgument {
           new PredicateLeafImpl(PredicateLeaf.Operator.EQUALS,
               getType(box), column, box, null);
       leaves.add(leaf);
-      parent.children.add(new ExpressionTree(leaves.size() - 1));
+      parent.getChildren().add(new ExpressionTree(leaves.size() - 1));
       return this;
     }
 
@@ -1163,7 +960,7 @@ final class SearchArgumentImpl implements SearchArgument {
           new PredicateLeafImpl(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
               getType(box), column, box, null);
       leaves.add(leaf);
-      parent.children.add(new ExpressionTree(leaves.size() - 1));
+      parent.getChildren().add(new ExpressionTree(leaves.size() - 1));
       return this;
     }
 
@@ -1183,7 +980,7 @@ final class SearchArgumentImpl implements SearchArgument {
           new PredicateLeafImpl(PredicateLeaf.Operator.IN,
               getType(argList.get(0)), column, null, argList);
       leaves.add(leaf);
-      parent.children.add(new ExpressionTree(leaves.size() - 1));
+      parent.getChildren().add(new ExpressionTree(leaves.size() - 1));
       return this;
     }
 
@@ -1194,7 +991,7 @@ final class SearchArgumentImpl implements SearchArgument {
           new PredicateLeafImpl(PredicateLeaf.Operator.IS_NULL,
               PredicateLeaf.Type.STRING, column, null, null);
       leaves.add(leaf);
-      parent.children.add(new ExpressionTree(leaves.size() - 1));
+      parent.getChildren().add(new ExpressionTree(leaves.size() - 1));
       return this;
     }
 
@@ -1208,7 +1005,7 @@ final class SearchArgumentImpl implements SearchArgument {
           new PredicateLeafImpl(PredicateLeaf.Operator.BETWEEN,
               getType(argList.get(0)), column, null, argList);
       leaves.add(leaf);
-      parent.children.add(new ExpressionTree(leaves.size() - 1));
+      parent.getChildren().add(new ExpressionTree(leaves.size() - 1));
       return this;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1280ccae/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java b/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java
index 9e6adef..46ce49c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java
@@ -27,9 +27,9 @@ import com.google.common.collect.Sets;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
+import org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.TruthValue;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl.ExpressionBuilder;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl.ExpressionTree;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl.PredicateLeafImpl;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
@@ -784,7 +784,7 @@ public class TestSearchArgumentImpl {
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(9, leaves.size());
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String[] conditions = new String[]{
       "eq(first_name, Binary{\"john\"})",    /* first_name = 'john' */
       "not(lteq(first_name, Binary{\"greg\"}))", /* 'greg' < first_name */
@@ -1076,7 +1076,7 @@ public class TestSearchArgumentImpl {
       "lteq(id, 4)"                         /* id <= 4             */
     };
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected = String.format("or(or(or(%1$s, %2$s), %3$s), %4$s)", conditions);
     assertEquals(expected, p.toString());
 
@@ -1506,7 +1506,7 @@ public class TestSearchArgumentImpl {
       "eq(last_name, Binary{\"smith\"})"    /* 'smith' = last_name  */
     };
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected = String.format("and(and(and(%1$s, %2$s), %3$s), %4$s)", conditions);
     assertEquals(expected, p.toString());
 
@@ -1727,7 +1727,7 @@ public class TestSearchArgumentImpl {
       "or(eq(id, 34), eq(id, 50))" /* id in (34,50) */
     };
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected = String.format("and(and(%1$s, %2$s), %3$s)", conditions);
     assertEquals(expected, p.toString());
 
@@ -1986,7 +1986,7 @@ public class TestSearchArgumentImpl {
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(1, leaves.size());
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected =
       "and(lt(first_name, Binary{\"greg\"}), not(lteq(first_name, Binary{\"david\"})))";
     assertEquals(p.toString(), expected);
@@ -2466,7 +2466,7 @@ public class TestSearchArgumentImpl {
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(9, leaves.size());
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected = "and(and(and(and(and(and(and(and(and(and(and(and(and(and(and(and(and(" +
       "or(or(or(lt(id, 18), lt(id, 10)), lt(id, 13)), lt(id, 16)), " +
       "or(or(or(lt(id, 18), lt(id, 11)), lt(id, 13)), lt(id, 16))), " +
@@ -2622,7 +2622,7 @@ public class TestSearchArgumentImpl {
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(0, leaves.size());
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     assertNull(p);
 
     assertEquals("YES_NO_NULL",
@@ -2877,7 +2877,7 @@ public class TestSearchArgumentImpl {
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(1, leaves.size());
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected = "and(not(lt(id, 10)), not(lt(id, 10)))";
     assertEquals(expected, p.toString());
 
@@ -2934,7 +2934,7 @@ public class TestSearchArgumentImpl {
         "leaf-3 = (NULL_SAFE_EQUALS a stinger)\n" +
         "expr = (and (not leaf-0) (not leaf-1) (not leaf-2) (not leaf-3))", sarg.toString());
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected =
       "and(and(and(not(eq(x, null)), not(and(lt(y, 20), not(lteq(y, 10))))), not(or(or(eq(z, 1), " +
         "eq(z, 2)), eq(z, 3)))), not(eq(a, Binary{\"stinger\"})))";
@@ -2955,7 +2955,8 @@ public class TestSearchArgumentImpl {
         "leaf-1 = (LESS_THAN_EQUALS y hi)\n" +
         "leaf-2 = (EQUALS z 1)\n" +
         "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString());
-    assertEquals("lteq(y, Binary{\"hi\"})", sarg.toFilterPredicate().toString());
+    assertEquals("lteq(y, Binary{\"hi\"})",
+        ParquetRecordReaderWrapper.toFilterPredicate(sarg).toString());
 
     sarg = SearchArgumentFactory.newBuilder()
         .startNot()
@@ -2973,7 +2974,7 @@ public class TestSearchArgumentImpl {
         "leaf-3 = (NULL_SAFE_EQUALS a stinger)\n" +
         "expr = (and (not leaf-0) (not leaf-1) (not leaf-2) (not leaf-3))", sarg.toString());
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected = "and(and(not(eq(x, null)), not(or(or(eq(z, 1), eq(z, 2)), eq(z, 3)))), " +
         "not(eq(a, Binary{\"stinger\"})))";
     assertEquals(expected, p.toString());
@@ -2993,7 +2994,8 @@ public class TestSearchArgumentImpl {
         "leaf-1 = (LESS_THAN_EQUALS y hi)\n" +
         "leaf-2 = (EQUALS z 1.0)\n" +
         "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString());
-    assertEquals("lteq(y, Binary{\"hi\"})", sarg.toFilterPredicate().toString());
+    assertEquals("lteq(y, Binary{\"hi\"})",
+        ParquetRecordReaderWrapper.toFilterPredicate(sarg).toString());
 
     sarg = SearchArgumentFactory.newBuilder()
         .startNot()
@@ -3011,7 +3013,7 @@ public class TestSearchArgumentImpl {
         "leaf-3 = (NULL_SAFE_EQUALS a stinger)\n" +
         "expr = (and (not leaf-0) (not leaf-1) (not leaf-2) (not leaf-3))", sarg.toString());
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected = "and(and(not(eq(x, null)), not(or(or(eq(z, 1), eq(z, 2)), eq(z, 3)))), " +
         "not(eq(a, Binary{\"stinger\"})))";
     assertEquals(expected, p.toString());
@@ -3036,7 +3038,7 @@ public class TestSearchArgumentImpl {
         "leaf-4 = (EQUALS z1 0.22)\n" +
         "expr = (and leaf-0 leaf-1 leaf-2 leaf-3 leaf-4)", sarg.toString());
 
-    FilterPredicate p = sarg.toFilterPredicate();
+    FilterPredicate p = ParquetRecordReaderWrapper.toFilterPredicate(sarg);
     String expected = "and(and(and(and(lt(x, 22), lt(x1, 22)), lteq(y, Binary{\"hi\"})), eq(z, " +
         "0.22)), eq(z1, 0.22))";
     assertEquals(expected, p.toString());

http://git-wip-us.apache.org/repos/asf/hive/blob/1280ccae/serde/src/java/org/apache/hadoop/hive/ql/io/sarg/ExpressionTree.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/ql/io/sarg/ExpressionTree.java b/serde/src/java/org/apache/hadoop/hive/ql/io/sarg/ExpressionTree.java
new file mode 100644
index 0000000..2dd3a45
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/ql/io/sarg/ExpressionTree.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io.sarg;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * The inner representation of the SearchArgument. Most users should not
+ * need this interface, it is only for file formats that need to translate
+ * the SearchArgument into an internal form.
+ */
+public class ExpressionTree {
+  public enum Operator {OR, AND, NOT, LEAF, CONSTANT}
+  private final Operator operator;
+  private final List<ExpressionTree> children;
+  private final int leaf;
+  private final SearchArgument.TruthValue constant;
+
+  ExpressionTree() {
+    operator = null;
+    children = null;
+    leaf = 0;
+    constant = null;
+  }
+
+  ExpressionTree(Operator op, ExpressionTree... kids) {
+    operator = op;
+    children = new ArrayList<ExpressionTree>();
+    leaf = -1;
+    this.constant = null;
+    Collections.addAll(children, kids);
+  }
+
+  ExpressionTree(int leaf) {
+    operator = Operator.LEAF;
+    children = null;
+    this.leaf = leaf;
+    this.constant = null;
+  }
+
+  ExpressionTree(SearchArgument.TruthValue constant) {
+    operator = Operator.CONSTANT;
+    children = null;
+    this.leaf = -1;
+    this.constant = constant;
+  }
+
+  ExpressionTree(ExpressionTree other) {
+    this.operator = other.operator;
+    if (other.children == null) {
+      this.children = null;
+    } else {
+      this.children = new ArrayList<ExpressionTree>();
+      for(ExpressionTree child: other.children) {
+        children.add(new ExpressionTree(child));
+      }
+    }
+    this.leaf = other.leaf;
+    this.constant = other.constant;
+  }
+
+  public SearchArgument.TruthValue evaluate(SearchArgument.TruthValue[] leaves
+                                            ) {
+    SearchArgument.TruthValue result = null;
+    switch (operator) {
+      case OR:
+        for(ExpressionTree child: children) {
+          result = child.evaluate(leaves).or(result);
+        }
+        return result;
+      case AND:
+        for(ExpressionTree child: children) {
+          result = child.evaluate(leaves).and(result);
+        }
+        return result;
+      case NOT:
+        return children.get(0).evaluate(leaves).not();
+      case LEAF:
+        return leaves[leaf];
+      case CONSTANT:
+        return constant;
+      default:
+        throw new IllegalStateException("Unknown operator: " + operator);
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder buffer = new StringBuilder();
+    switch (operator) {
+      case OR:
+        buffer.append("(or");
+        for(ExpressionTree child: children) {
+          buffer.append(' ');
+          buffer.append(child.toString());
+        }
+        buffer.append(')');
+        break;
+      case AND:
+        buffer.append("(and");
+        for(ExpressionTree child: children) {
+          buffer.append(' ');
+          buffer.append(child.toString());
+        }
+        buffer.append(')');
+        break;
+      case NOT:
+        buffer.append("(not ");
+        buffer.append(children.get(0));
+        buffer.append(')');
+        break;
+      case LEAF:
+        buffer.append("leaf-");
+        buffer.append(leaf);
+        break;
+      case CONSTANT:
+        buffer.append(constant);
+        break;
+    }
+    return buffer.toString();
+  }
+
+  public Operator getOperator() {
+    return operator;
+  }
+
+  public List<ExpressionTree> getChildren() {
+    return children;
+  }
+
+  public SearchArgument.TruthValue getConstant() {
+    return constant;
+  }
+
+  public int getLeaf() {
+    return leaf;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/1280ccae/serde/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java b/serde/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java
index df208d4..84604cb 100644
--- a/serde/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java
+++ b/serde/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.io.sarg;
 
-import org.apache.parquet.filter2.predicate.FilterPredicate;
-
 import java.util.List;
 
 /**
@@ -159,6 +157,12 @@ public interface SearchArgument {
   public List<PredicateLeaf> getLeaves();
 
   /**
+   * Get the expression tree. This should only needed for file formats that
+   * need to translate the expression to an internal form.
+   */
+  public ExpressionTree getExpression();
+ 
+  /**
    * Evaluate the entire predicate based on the values for the leaf predicates.
    * @param leaves the value of each leaf predicate
    * @return the value of hte entire predicate
@@ -177,12 +181,6 @@ public interface SearchArgument {
   public String toKryo();
 
   /**
-   * Translate the search argument to the filter predicate parquet used
-   * @return
-   */
-  public FilterPredicate toFilterPredicate();
-
-  /**
    * A builder object for contexts outside of Hive where it isn't easy to
    * get a ExprNodeDesc. The user must call startOr, startAnd, or startNot
    * before adding any leaves.


[19/50] [abbrv] hive git commit: HIVE-11190: No prompting info or warning provided when METASTORE_FILTER_HOOK in authorization V2 is overridden(Dapeng Sun, reviewed by Thejas M Nair and Ferdinand Xu)

Posted by xu...@apache.org.
HIVE-11190: No prompting info or warning provided when METASTORE_FILTER_HOOK in authorization V2 is overridden(Dapeng Sun, reviewed by Thejas M Nair and Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ad1cb15a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ad1cb15a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ad1cb15a

Branch: refs/heads/beeline-cli
Commit: ad1cb15a8e35ebc1631996ffda7b4302276483bc
Parents: e6ea691
Author: Ferdinand Xu <ch...@intel.com>
Authored: Sun Jul 12 21:08:58 2015 -0400
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Sun Jul 12 21:09:31 2015 -0400

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/session/SessionState.java   | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ad1cb15a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 0bc9a46..49d64db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
 import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.AuthorizationMetaStoreFilterHook;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext;
@@ -764,8 +765,15 @@ public class SessionState {
     if (conf.get(CONFIG_AUTHZ_SETTINGS_APPLIED_MARKER, "").equals(Boolean.TRUE.toString())) {
       return;
     }
+    String metastoreHook = conf.get(ConfVars.METASTORE_FILTER_HOOK.name());
+    if (!ConfVars.METASTORE_FILTER_HOOK.getDefaultValue().equals(metastoreHook) &&
+        !AuthorizationMetaStoreFilterHook.class.getName().equals(metastoreHook)) {
+      LOG.warn(ConfVars.METASTORE_FILTER_HOOK.name() +
+          " will be ignored, since hive.security.authorization.manager" +
+          " is set to instance of HiveAuthorizerFactory.");
+    }
     conf.setVar(ConfVars.METASTORE_FILTER_HOOK,
-        "org.apache.hadoop.hive.ql.security.authorization.plugin.AuthorizationMetaStoreFilterHook");
+        AuthorizationMetaStoreFilterHook.class.getName());
 
     authorizerV2.applyAuthorizationConfigPolicy(conf);
     // update config in Hive thread local as well and init the metastore client


[47/50] [abbrv] hive git commit: HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
index 9e1ac80..2dabce2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
@@ -18,14 +18,6 @@
 
 package org.apache.hadoop.hive.ql.plan;
 
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -34,6 +26,14 @@ import org.apache.hadoop.hive.ql.parse.ParseUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 /**
  * AlterTableDesc.
  *
@@ -51,8 +51,7 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
     ADDPROPS("add props"), DROPPROPS("drop props"), ADDSERDE("add serde"), ADDSERDEPROPS("add serde props"),
     ADDFILEFORMAT("add fileformat"), ADDCLUSTERSORTCOLUMN("add cluster sort column"),
     RENAMECOLUMN("rename column"), ADDPARTITION("add partition"), TOUCH("touch"), ARCHIVE("archieve"),
-    UNARCHIVE("unarchieve"), ALTERPROTECTMODE("alter protect mode"),
-    ALTERPARTITIONPROTECTMODE("alter partition protect mode"), ALTERLOCATION("alter location"),
+    UNARCHIVE("unarchieve"), ALTERLOCATION("alter location"),
     DROPPARTITION("drop partition"), RENAMEPARTITION("rename partition"), ADDSKEWEDBY("add skew column"),
     ALTERSKEWEDLOCATION("alter skew location"), ALTERBUCKETNUM("alter bucket number"),
     ALTERPARTITION("alter partition"), COMPACT("compact"),
@@ -72,7 +71,6 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
       new HashSet<AlterTableDesc.AlterTableTypes>();
 
   static {
-    alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE);
     alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDCOLS);
     alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.REPLACECOLS);
     alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.RENAMECOLUMN);

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java
index bb0e7f7..62c8f7e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java
@@ -18,12 +18,13 @@
 
 package org.apache.hadoop.hive.ql.plan;
 
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 /**
  * DropTableDesc.
@@ -55,7 +56,6 @@ public class DropTableDesc extends DDLDesc implements Serializable {
   boolean expectView;
   boolean ifExists;
   boolean ifPurge;
-  boolean ignoreProtection;
   ReplicationSpec replicationSpec;
 
   public DropTableDesc() {
@@ -73,13 +73,11 @@ public class DropTableDesc extends DDLDesc implements Serializable {
     this.expectView = expectView;
     this.ifExists = ifExists;
     this.ifPurge = ifPurge;
-    this.ignoreProtection = false;
     this.replicationSpec = replicationSpec;
   }
 
   public DropTableDesc(String tableName, Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs,
-      boolean expectView, boolean ignoreProtection, boolean ifPurge,
-      ReplicationSpec replicationSpec) {
+      boolean expectView, boolean ifPurge, ReplicationSpec replicationSpec) {
     this.tableName = tableName;
     this.partSpecs = new ArrayList<PartSpec>(partSpecs.size());
     for (Map.Entry<Integer, List<ExprNodeGenericFuncDesc>> partSpec : partSpecs.entrySet()) {
@@ -88,7 +86,6 @@ public class DropTableDesc extends DDLDesc implements Serializable {
         this.partSpecs.add(new PartSpec(expr, prefixLength));
       }
     }
-    this.ignoreProtection = ignoreProtection;
     this.expectView = expectView;
     this.ifPurge = ifPurge;
     this.replicationSpec = replicationSpec;
@@ -118,21 +115,6 @@ public class DropTableDesc extends DDLDesc implements Serializable {
   }
 
   /**
-   * @return whether or not protection will be ignored for the partition
-   */
-  public boolean getIgnoreProtection() {
-    return ignoreProtection;
-  }
-
-  /**
-   * @param ignoreProtection
-   *          set whether or not protection will be ignored for the partition
-   */
-   public void setIgnoreProtection(boolean ignoreProtection) {
-     this.ignoreProtection = ignoreProtection;
-   }
-
-  /**
    * @return whether to expect a view being dropped
    */
   public boolean getExpectView() {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
index 75cdf16..df37832 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
@@ -93,8 +93,6 @@ public enum HiveOperation {
   SHOW_ROLES("SHOW_ROLES", null, null),
   SHOW_ROLE_PRINCIPALS("SHOW_ROLE_PRINCIPALS", null, null),
   SHOW_ROLE_GRANT("SHOW_ROLE_GRANT", null, null),
-  ALTERTABLE_PROTECTMODE("ALTERTABLE_PROTECTMODE", new Privilege[]{Privilege.ALTER_METADATA}, null),
-  ALTERPARTITION_PROTECTMODE("ALTERPARTITION_PROTECTMODE", new Privilege[]{Privilege.ALTER_METADATA}, null),
   ALTERTABLE_FILEFORMAT("ALTERTABLE_FILEFORMAT", new Privilege[]{Privilege.ALTER_METADATA}, null),
   ALTERPARTITION_FILEFORMAT("ALTERPARTITION_FILEFORMAT", new Privilege[]{Privilege.ALTER_METADATA}, null),
   ALTERTABLE_LOCATION("ALTERTABLE_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null),

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q b/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q
deleted file mode 100644
index 8cbb25c..0000000
--- a/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q
+++ /dev/null
@@ -1,8 +0,0 @@
--- Create table
-create table if not exists alter_part_invalidspec(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-
--- Load data
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_invalidspec partition (year='1996', month='10');
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_invalidspec partition (year='1996', month='12');
-
-alter table alter_part_invalidspec partition (year='1997') enable no_drop;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/alter_partition_nodrop.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/alter_partition_nodrop.q b/ql/src/test/queries/clientnegative/alter_partition_nodrop.q
deleted file mode 100644
index 3c0ff02..0000000
--- a/ql/src/test/queries/clientnegative/alter_partition_nodrop.q
+++ /dev/null
@@ -1,9 +0,0 @@
--- Create table
-create table if not exists alter_part_nodrop_part(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-
--- Load data
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_nodrop_part partition (year='1996', month='10');
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_nodrop_part partition (year='1996', month='12');
-
-alter table alter_part_nodrop_part partition (year='1996') enable no_drop;
-alter table alter_part_nodrop_part drop partition (year='1996');

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q b/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q
deleted file mode 100644
index f2135b1..0000000
--- a/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q
+++ /dev/null
@@ -1,9 +0,0 @@
--- Create table
-create table if not exists alter_part_nodrop_table(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-
--- Load data
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_nodrop_table partition (year='1996', month='10');
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_nodrop_table partition (year='1996', month='12');
-
-alter table alter_part_nodrop_table partition (year='1996') enable no_drop;
-drop table alter_part_nodrop_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/alter_partition_offline.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/alter_partition_offline.q b/ql/src/test/queries/clientnegative/alter_partition_offline.q
deleted file mode 100644
index 7376d8b..0000000
--- a/ql/src/test/queries/clientnegative/alter_partition_offline.q
+++ /dev/null
@@ -1,11 +0,0 @@
--- create table
-create table if not exists alter_part_offline (key string, value string ) partitioned by (year string, month string) stored as textfile ;
-
--- Load data
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_offline partition (year='1996', month='10');
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_offline partition (year='1996', month='12');
-
-alter table alter_part_offline partition (year='1996') disable offline;
-select * from alter_part_offline where year = '1996';
-alter table alter_part_offline partition (year='1996') enable offline;
-select * from alter_part_offline where year = '1996';

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/drop_table_failure3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/drop_table_failure3.q b/ql/src/test/queries/clientnegative/drop_table_failure3.q
deleted file mode 100644
index 534ce0b..0000000
--- a/ql/src/test/queries/clientnegative/drop_table_failure3.q
+++ /dev/null
@@ -1,12 +0,0 @@
-create database dtf3;
-use dtf3; 
-
-create table drop_table_failure_temp(col STRING) partitioned by (p STRING);
-
-alter table drop_table_failure_temp add partition (p ='p1');
-alter table drop_table_failure_temp add partition (p ='p2');
-alter table drop_table_failure_temp add partition (p ='p3');
-
-alter table drop_table_failure_temp partition (p ='p3') ENABLE NO_DROP;
-
-drop table drop_table_failure_temp;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_part.q b/ql/src/test/queries/clientnegative/protectmode_part.q
deleted file mode 100644
index 5415999..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_part.q
+++ /dev/null
@@ -1,15 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode3;
-
-create table tbl_protectmode3  (col string) partitioned by (p string);
-alter table tbl_protectmode3 add partition (p='p1');
-alter table tbl_protectmode3 add partition (p='p2');
-
-select * from tbl_protectmode3 where p='p1';
-select * from tbl_protectmode3 where p='p2';
-
-alter table tbl_protectmode3 partition (p='p1') enable offline;
-
-select * from tbl_protectmode3 where p='p2';
-select * from tbl_protectmode3 where p='p1';

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_part1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_part1.q b/ql/src/test/queries/clientnegative/protectmode_part1.q
deleted file mode 100644
index 99256da..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_part1.q
+++ /dev/null
@@ -1,21 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode5;
-
-create table tbl_protectmode5_1 (col string);
-
-create table tbl_protectmode5  (col string) partitioned by (p string);
-alter table tbl_protectmode5 add partition (p='p1');
-alter table tbl_protectmode5 add partition (p='p2');
-
-insert overwrite table tbl_protectmode5_1
-select col from tbl_protectmode5 where p='p1';
-insert overwrite table tbl_protectmode5_1
-select col from tbl_protectmode5 where p='p2';
-
-alter table tbl_protectmode5 partition (p='p1') enable offline;
-
-insert overwrite table tbl_protectmode5_1
-select col from tbl_protectmode5 where p='p2';
-insert overwrite table tbl_protectmode5_1
-select col from tbl_protectmode5 where p='p1';

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_part2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_part2.q b/ql/src/test/queries/clientnegative/protectmode_part2.q
deleted file mode 100644
index 3fdc036..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_part2.q
+++ /dev/null
@@ -1,9 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode6;
-
-create table tbl_protectmode6  (c1 string,c2 string) partitioned by (p string);
-alter table tbl_protectmode6 add partition (p='p1');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE tbl_protectmode6 partition (p='p1');
-alter table tbl_protectmode6 partition (p='p1') enable offline; 
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE tbl_protectmode6 partition (p='p1');

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_part_no_drop.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_part_no_drop.q b/ql/src/test/queries/clientnegative/protectmode_part_no_drop.q
deleted file mode 100644
index b4e508f..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_part_no_drop.q
+++ /dev/null
@@ -1,10 +0,0 @@
--- protect mode: syntax to change protect mode works and queries to drop partitions are blocked if it is marked no drop
-
-drop table tbl_protectmode_no_drop;
-
-create table tbl_protectmode_no_drop  (c1 string,c2 string) partitioned by (p string);
-alter table tbl_protectmode_no_drop add partition (p='p1');
-alter table tbl_protectmode_no_drop partition (p='p1') enable no_drop;
-desc extended tbl_protectmode_no_drop partition (p='p1');
-
-alter table tbl_protectmode_no_drop drop partition (p='p1');

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_part_no_drop2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_part_no_drop2.q b/ql/src/test/queries/clientnegative/protectmode_part_no_drop2.q
deleted file mode 100644
index e7e8c42..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_part_no_drop2.q
+++ /dev/null
@@ -1,11 +0,0 @@
--- protect mode: syntax to change protect mode works and queries to drop partitions are blocked if it is marked no drop
-
-create database if not exists db1;
-use db1;
-
-create table tbl_protectmode_no_drop2  (c1 string,c2 string) partitioned by (p string);
-alter table tbl_protectmode_no_drop2 add partition (p='p1');
-alter table tbl_protectmode_no_drop2 partition (p='p1') enable no_drop;
-
-use default;
-drop table db1.tbl_protectmode_no_drop2;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_tbl1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_tbl1.q b/ql/src/test/queries/clientnegative/protectmode_tbl1.q
deleted file mode 100644
index 2361299..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_tbl1.q
+++ /dev/null
@@ -1,8 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode_1;
-
-create table tbl_protectmode_1  (col string);
-select * from tbl_protectmode_1;
-alter table tbl_protectmode_1 enable offline;
-select * from tbl_protectmode_1;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_tbl2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_tbl2.q b/ql/src/test/queries/clientnegative/protectmode_tbl2.q
deleted file mode 100644
index 05964c3..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_tbl2.q
+++ /dev/null
@@ -1,12 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode2;
-
-create table tbl_protectmode2  (col string) partitioned by (p string);
-alter table tbl_protectmode2 add partition (p='p1');
-alter table tbl_protectmode2 enable no_drop;
-alter table tbl_protectmode2 enable offline;
-alter table tbl_protectmode2 disable no_drop;
-desc extended tbl_protectmode2;
-
-select * from tbl_protectmode2 where p='p1';

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_tbl3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_tbl3.q b/ql/src/test/queries/clientnegative/protectmode_tbl3.q
deleted file mode 100644
index bbaa267..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_tbl3.q
+++ /dev/null
@@ -1,10 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode_4;
-
-create table tbl_protectmode_4  (col string);
-select col from tbl_protectmode_4;
-alter table tbl_protectmode_4 enable offline;
-desc extended tbl_protectmode_4;
-
-select col from tbl_protectmode_4;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_tbl4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_tbl4.q b/ql/src/test/queries/clientnegative/protectmode_tbl4.q
deleted file mode 100644
index c7880de..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_tbl4.q
+++ /dev/null
@@ -1,15 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode_tbl4;
-drop table tbl_protectmode_tbl4_src;
-
-create table tbl_protectmode_tbl4_src (col string);
-
-create table tbl_protectmode_tbl4  (col string) partitioned by (p string);
-alter table tbl_protectmode_tbl4 add partition (p='p1');
-alter table tbl_protectmode_tbl4 enable no_drop;
-alter table tbl_protectmode_tbl4 enable offline;
-alter table tbl_protectmode_tbl4 disable no_drop;
-desc extended tbl_protectmode_tbl4;
-
-select col from tbl_protectmode_tbl4 where p='not_exist';

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_tbl5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_tbl5.q b/ql/src/test/queries/clientnegative/protectmode_tbl5.q
deleted file mode 100644
index cd848fd..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_tbl5.q
+++ /dev/null
@@ -1,15 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode_tbl5;
-drop table tbl_protectmode_tbl5_src;
-
-create table tbl_protectmode_tbl5_src (col string);
-
-create table tbl_protectmode_tbl5  (col string) partitioned by (p string);
-alter table tbl_protectmode_tbl5 add partition (p='p1');
-alter table tbl_protectmode_tbl5 enable no_drop;
-alter table tbl_protectmode_tbl5 enable offline;
-alter table tbl_protectmode_tbl5 disable no_drop;
-desc extended tbl_protectmode_tbl5;
-
-insert overwrite table tbl_protectmode_tbl5 partition (p='not_exist') select col from tbl_protectmode_tbl5_src;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_tbl6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_tbl6.q b/ql/src/test/queries/clientnegative/protectmode_tbl6.q
deleted file mode 100644
index 26248cc..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_tbl6.q
+++ /dev/null
@@ -1,8 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode_tbl6;
-
-create table tbl_protectmode_tbl6 (col string);
-alter table tbl_protectmode_tbl6 enable no_drop cascade;
-
-drop table tbl_protectmode_tbl6;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_tbl7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_tbl7.q b/ql/src/test/queries/clientnegative/protectmode_tbl7.q
deleted file mode 100644
index afff840..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_tbl7.q
+++ /dev/null
@@ -1,13 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode_tbl7;
-create table tbl_protectmode_tbl7  (col string) partitioned by (p string);
-alter table tbl_protectmode_tbl7 add partition (p='p1');
-alter table tbl_protectmode_tbl7 enable no_drop;
-
-alter table tbl_protectmode_tbl7 drop partition (p='p1');
-
-alter table tbl_protectmode_tbl7 add partition (p='p1');
-alter table tbl_protectmode_tbl7 enable no_drop cascade;
-
-alter table tbl_protectmode_tbl7 drop partition (p='p1');

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_tbl8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_tbl8.q b/ql/src/test/queries/clientnegative/protectmode_tbl8.q
deleted file mode 100644
index 809c287..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_tbl8.q
+++ /dev/null
@@ -1,13 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode_tbl8;
-create table tbl_protectmode_tbl8  (col string) partitioned by (p string);
-alter table tbl_protectmode_tbl8 add partition (p='p1');
-alter table tbl_protectmode_tbl8 enable no_drop;
-
-alter table tbl_protectmode_tbl8 drop partition (p='p1');
-
-alter table tbl_protectmode_tbl8 enable no_drop cascade;
-
-alter table tbl_protectmode_tbl8 add partition (p='p1');
-alter table tbl_protectmode_tbl8 drop partition (p='p1');

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/protectmode_tbl_no_drop.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/protectmode_tbl_no_drop.q b/ql/src/test/queries/clientnegative/protectmode_tbl_no_drop.q
deleted file mode 100644
index a4ef2ac..0000000
--- a/ql/src/test/queries/clientnegative/protectmode_tbl_no_drop.q
+++ /dev/null
@@ -1,9 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl_protectmode__no_drop;
-
-create table tbl_protectmode__no_drop  (col string);
-select * from tbl_protectmode__no_drop;
-alter table tbl_protectmode__no_drop enable no_drop;
-desc extended tbl_protectmode__no_drop;
-drop table tbl_protectmode__no_drop;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientnegative/sa_fail_hook3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/sa_fail_hook3.q b/ql/src/test/queries/clientnegative/sa_fail_hook3.q
deleted file mode 100644
index e54201c..0000000
--- a/ql/src/test/queries/clientnegative/sa_fail_hook3.q
+++ /dev/null
@@ -1,4 +0,0 @@
-create table mp2 (a string) partitioned by (b string);
-alter table mp2 add partition (b='1');
-alter table mp2 partition (b='1') enable NO_DROP;
-alter table mp2 drop partition (b='1');

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q b/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q
deleted file mode 100644
index 7a1f3dd..0000000
--- a/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q
+++ /dev/null
@@ -1,26 +0,0 @@
--- Create table
-create table if not exists alter_part_protect_mode(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-
--- Load data
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='10');
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='12');
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1995', month='09');
-load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1994', month='07');
-
--- offline
-alter table alter_part_protect_mode partition (year='1996') disable offline;
-select * from alter_part_protect_mode where year = '1996';
-alter table alter_part_protect_mode partition (year='1995') enable offline;
-alter table alter_part_protect_mode partition (year='1995') disable offline;
-select * from alter_part_protect_mode where year = '1995';
-
--- no_drop
-alter table alter_part_protect_mode partition (year='1996') enable no_drop;
-alter table alter_part_protect_mode partition (year='1995') disable no_drop;
-alter table alter_part_protect_mode drop partition (year='1995');
-alter table alter_part_protect_mode partition (year='1994', month='07') disable no_drop;
-alter table alter_part_protect_mode drop partition (year='1994');
-
--- Cleanup
-alter table alter_part_protect_mode partition (year='1996') disable no_drop;
-drop table alter_part_protect_mode;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientpositive/drop_partitions_ignore_protection.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_partitions_ignore_protection.q b/ql/src/test/queries/clientpositive/drop_partitions_ignore_protection.q
deleted file mode 100644
index e825df9..0000000
--- a/ql/src/test/queries/clientpositive/drop_partitions_ignore_protection.q
+++ /dev/null
@@ -1,10 +0,0 @@
-create table tbl_protectmode_no_drop  (c1 string,c2 string) partitioned by (p string);
-alter table tbl_protectmode_no_drop add partition (p='p1');
-alter table tbl_protectmode_no_drop partition (p='p1') enable no_drop;
-desc extended tbl_protectmode_no_drop partition (p='p1');
-
--- The partition will be dropped, even though we have enabled no_drop
--- as 'ignore protection' has been specified in the command predicate
-alter table tbl_protectmode_no_drop drop partition (p='p1') ignore protection;
-drop table tbl_protectmode_no_drop;
-

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientpositive/protectmode.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/protectmode.q b/ql/src/test/queries/clientpositive/protectmode.q
deleted file mode 100644
index 27055fb..0000000
--- a/ql/src/test/queries/clientpositive/protectmode.q
+++ /dev/null
@@ -1,63 +0,0 @@
--- protect mode: syntax to change protect mode works and queries are not blocked if a table or partition is not in protect mode
-
-drop table tbl1;
-drop table tbl2;
-
-create table tbl1  (col string);
-select * from tbl1;
-select col from tbl1;
-alter table tbl1 enable offline;
-desc extended tbl1;
-alter table tbl1 disable offline;
-desc extended tbl1;
-select * from tbl1;
-select col from tbl1;
- 
-create table tbl2  (col string) partitioned by (p string);
-alter table tbl2 add partition (p='p1');
-alter table tbl2 add partition (p='p2');
-alter table tbl2 add partition (p='p3');
-alter table tbl2 drop partition (p='not_exist');
-
-select * from tbl2 where p='p1';
-select * from tbl2 where p='p2';
-
-alter table tbl2 partition (p='p1') enable offline;
-desc extended tbl2 partition (p='p1');
-
-alter table tbl2 enable offline;
-desc extended tbl2;
-
-alter table tbl2 enable no_drop;
-desc extended tbl2;
-alter table tbl2 drop partition (p='p3');
-
-alter table tbl2 disable offline;
-desc extended tbl2;
-
-alter table tbl2 disable no_drop;
-desc extended tbl2;
-
-select * from tbl2 where p='p2';
-select col from tbl2 where p='p2';
-
-alter table tbl2 partition (p='p1') disable offline;
-desc extended tbl2 partition (p='p1');
-
-select * from tbl2 where p='p1';
-select col from tbl2 where p='p1';
-
-insert overwrite table tbl1 select col from tbl2 where p='p1';
-insert overwrite table tbl1 select col from tbl1;
-
-alter table tbl2 partition (p='p1') enable no_drop;
-alter table tbl2 partition (p='p1') disable no_drop;
-
-alter table tbl2 partition (p='p2') enable no_drop;
-
-alter table tbl2 drop partition (p='p1');
-
-alter table tbl2 partition (p='p2') disable no_drop;
-
-drop table tbl1;
-drop table tbl2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/queries/clientpositive/protectmode2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/protectmode2.q b/ql/src/test/queries/clientpositive/protectmode2.q
deleted file mode 100644
index 978b032..0000000
--- a/ql/src/test/queries/clientpositive/protectmode2.q
+++ /dev/null
@@ -1,23 +0,0 @@
-drop table tbl1;
-
-create table tbl1 (col string);
-alter table tbl1 enable no_drop cascade;
-desc extended tbl1;
-alter table tbl1 enable no_drop;
-desc extended tbl1;
-alter table tbl1 disable no_drop cascade;
-desc extended tbl1;
-alter table tbl1 disable no_drop;
-
-drop table tbl1;
-
-drop table tbl2;
-create table tbl2 (col string) partitioned by (p string);
-alter table tbl2 add partition (p='p1');
-alter table tbl2 add partition (p='p2');
-alter table tbl2 add partition (p='p3');
-alter table tbl2 enable no_drop cascade;
-desc formatted tbl2;
-alter table tbl2 disable no_drop cascade;
-desc formatted tbl2;
-drop table tbl2;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out b/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
index b5b089a..b0ccce5 100644
--- a/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
+++ b/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
@@ -24,7 +24,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -69,7 +68,6 @@ No rows selected
 'Table:              ','tst1                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1/ds=1',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -117,7 +115,6 @@ No rows selected
 'Table:              ','tst1                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1/ds=1',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -155,7 +152,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -203,7 +199,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -251,7 +246,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -299,7 +293,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -347,7 +340,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/create_like.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_like.q.out b/ql/src/test/results/beelinepositive/create_like.q.out
index 40b6cb7..df1ccc3 100644
--- a/ql/src/test/results/beelinepositive/create_like.q.out
+++ b/ql/src/test/results/beelinepositive/create_like.q.out
@@ -18,7 +18,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like.db/table1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -51,7 +50,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like.db/table2',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -90,7 +88,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like.db/table3',''
 'Table Type:         ','EXTERNAL_TABLE      ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/create_like2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_like2.q.out b/ql/src/test/results/beelinepositive/create_like2.q.out
index 8bdb44f..ca6c69a 100644
--- a/ql/src/test/results/beelinepositive/create_like2.q.out
+++ b/ql/src/test/results/beelinepositive/create_like2.q.out
@@ -24,7 +24,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like2.db/table2',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/create_like_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_like_view.q.out b/ql/src/test/results/beelinepositive/create_like_view.q.out
index 80483c3..4d5ede1 100644
--- a/ql/src/test/results/beelinepositive/create_like_view.q.out
+++ b/ql/src/test/results/beelinepositive/create_like_view.q.out
@@ -28,7 +28,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like_view.db/table1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -70,7 +69,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like_view.db/table2',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -114,7 +112,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like_view.db/table3',''
 'Table Type:         ','EXTERNAL_TABLE      ',''
@@ -182,7 +179,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like_view.db/table1',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/create_skewed_table1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_skewed_table1.q.out b/ql/src/test/results/beelinepositive/create_skewed_table1.q.out
index 60b54a7..c887e28 100644
--- a/ql/src/test/results/beelinepositive/create_skewed_table1.q.out
+++ b/ql/src/test/results/beelinepositive/create_skewed_table1.q.out
@@ -20,7 +20,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_skewed_table1.db/list_bucket_single_2',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -52,7 +51,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_skewed_table1.db/list_bucket_single',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -85,7 +83,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_skewed_table1.db/list_bucket_multiple',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/create_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_view.q.out b/ql/src/test/results/beelinepositive/create_view.q.out
index 0ec69d1..2ae4e08 100644
--- a/ql/src/test/results/beelinepositive/create_view.q.out
+++ b/ql/src/test/results/beelinepositive/create_view.q.out
@@ -170,7 +170,6 @@ SELECT * from view2 where key=18;
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -213,7 +212,6 @@ SELECT * from view2 where key=18;
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -253,7 +251,6 @@ SELECT * from view2 where key=18;
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -293,7 +290,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -503,7 +499,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -560,7 +555,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -614,7 +608,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -664,7 +657,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -716,7 +708,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -770,7 +761,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -827,7 +817,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -920,7 +909,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -1015,7 +1003,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -1079,7 +1066,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/create_view_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_view_partitioned.q.out b/ql/src/test/results/beelinepositive/create_view_partitioned.q.out
index 1f0717e..9460960 100644
--- a/ql/src/test/results/beelinepositive/create_view_partitioned.q.out
+++ b/ql/src/test/results/beelinepositive/create_view_partitioned.q.out
@@ -44,7 +44,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -186,7 +185,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''
@@ -258,7 +256,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Table Type:         ','VIRTUAL_VIEW        ',''
 'Table Parameters:','',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/ctas.q.out b/ql/src/test/results/beelinepositive/ctas.q.out
index 5ba3374..15e3355 100644
--- a/ql/src/test/results/beelinepositive/ctas.q.out
+++ b/ql/src/test/results/beelinepositive/ctas.q.out
@@ -144,7 +144,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -299,7 +298,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas2',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -455,7 +453,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas3',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -520,7 +517,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas3',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -676,7 +672,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas4',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out b/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
index 420f8df..f393f58 100644
--- a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
+++ b/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
@@ -33,7 +33,6 @@ No rows affected
 'Table:              ','view_partitioned    ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','null                ',''
 'Partition Parameters:','',''
 '','transient_lastDdlTime','!!UNIXTIME!!          '

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/describe_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_table.q.out b/ql/src/test/results/beelinepositive/describe_table.q.out
index 71d1a54..1ad5134 100644
--- a/ql/src/test/results/beelinepositive/describe_table.q.out
+++ b/ql/src/test/results/beelinepositive/describe_table.q.out
@@ -60,7 +60,6 @@ Saving all output to "!!{outputDirectory}!!/describe_table.q.raw". Enter "record
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -108,7 +107,6 @@ Saving all output to "!!{outputDirectory}!!/describe_table.q.raw". Enter "record
 'Table:              ','srcpart             ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart/ds=2008-04-08/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -157,7 +155,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart_serdeprops',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/merge3.q.out b/ql/src/test/results/beelinepositive/merge3.q.out
index 5d85293..7e7d8cb 100644
--- a/ql/src/test/results/beelinepositive/merge3.q.out
+++ b/ql/src/test/results/beelinepositive/merge3.q.out
@@ -2283,7 +2283,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/merge3.db/merge_src2',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/part_inherit_tbl_props.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/part_inherit_tbl_props.q.out b/ql/src/test/results/beelinepositive/part_inherit_tbl_props.q.out
index b436880..0b7ba0e 100644
--- a/ql/src/test/results/beelinepositive/part_inherit_tbl_props.q.out
+++ b/ql/src/test/results/beelinepositive/part_inherit_tbl_props.q.out
@@ -25,7 +25,6 @@ No rows affected
 'Table:              ','mytbl               ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/part_inherit_tbl_props.db/mytbl/c2=v1',''
 'Partition Parameters:','',''
 '','a                   ','myval               '

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/part_inherit_tbl_props_empty.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/part_inherit_tbl_props_empty.q.out b/ql/src/test/results/beelinepositive/part_inherit_tbl_props_empty.q.out
index 2010d13..fa45c0c 100644
--- a/ql/src/test/results/beelinepositive/part_inherit_tbl_props_empty.q.out
+++ b/ql/src/test/results/beelinepositive/part_inherit_tbl_props_empty.q.out
@@ -23,7 +23,6 @@ No rows affected
 'Table:              ','mytbl               ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/part_inherit_tbl_props_empty.db/mytbl/c2=v1',''
 'Partition Parameters:','',''
 '','transient_lastDdlTime','!!UNIXTIME!!          '

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/part_inherit_tbl_props_with_star.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/part_inherit_tbl_props_with_star.q.out b/ql/src/test/results/beelinepositive/part_inherit_tbl_props_with_star.q.out
index 27c58e0..b6d964e 100644
--- a/ql/src/test/results/beelinepositive/part_inherit_tbl_props_with_star.q.out
+++ b/ql/src/test/results/beelinepositive/part_inherit_tbl_props_with_star.q.out
@@ -25,7 +25,6 @@ No rows affected
 'Table:              ','mytbl               ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/part_inherit_tbl_props_with_star.db/mytbl/c2=v1',''
 'Partition Parameters:','',''
 '','a                   ','myval               '

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/protectmode2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/protectmode2.q.out b/ql/src/test/results/beelinepositive/protectmode2.q.out
index 9995420..acaa26b 100644
--- a/ql/src/test/results/beelinepositive/protectmode2.q.out
+++ b/ql/src/test/results/beelinepositive/protectmode2.q.out
@@ -63,7 +63,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','NO_DROP_CASCADE     ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/protectmode2.db/tbl2',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -102,7 +101,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/protectmode2.db/tbl2',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats1.q.out b/ql/src/test/results/beelinepositive/stats1.q.out
index a6d10df..91a9f5c 100644
--- a/ql/src/test/results/beelinepositive/stats1.q.out
+++ b/ql/src/test/results/beelinepositive/stats1.q.out
@@ -185,7 +185,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats1.db/tmptable',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -226,7 +225,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats1.db/tmptable',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats10.q.out b/ql/src/test/results/beelinepositive/stats10.q.out
index 803a897..a7b38e7 100644
--- a/ql/src/test/results/beelinepositive/stats10.q.out
+++ b/ql/src/test/results/beelinepositive/stats10.q.out
@@ -395,7 +395,6 @@ No rows selected
 'Table:              ','bucket3_1           ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats10.db/bucket3_1/ds=1',''
 'Partition Parameters:','',''
 '','numFiles            ','2                   '
@@ -433,7 +432,6 @@ No rows selected
 'Table:              ','bucket3_1           ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats10.db/bucket3_1/ds=2',''
 'Partition Parameters:','',''
 '','numFiles            ','2                   '
@@ -470,7 +468,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats10.db/bucket3_1',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats11.q.out b/ql/src/test/results/beelinepositive/stats11.q.out
index e86aaf9..51bd85b 100644
--- a/ql/src/test/results/beelinepositive/stats11.q.out
+++ b/ql/src/test/results/beelinepositive/stats11.q.out
@@ -69,7 +69,6 @@ No rows affected
 'Table:              ','srcbucket_mapjoin_part',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats11.db/srcbucket_mapjoin_part/ds=2008-04-08',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -109,7 +108,6 @@ No rows affected
 'Table:              ','srcbucket_mapjoin_part',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats11.db/srcbucket_mapjoin_part/ds=2008-04-08',''
 'Partition Parameters:','',''
 '','numFiles            ','2                   '
@@ -149,7 +147,6 @@ No rows affected
 'Table:              ','srcbucket_mapjoin_part',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats11.db/srcbucket_mapjoin_part/ds=2008-04-08',''
 'Partition Parameters:','',''
 '','numFiles            ','3                   '
@@ -189,7 +186,6 @@ No rows affected
 'Table:              ','srcbucket_mapjoin_part',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats11.db/srcbucket_mapjoin_part/ds=2008-04-08',''
 'Partition Parameters:','',''
 '','numFiles            ','4                   '

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats12.q.out b/ql/src/test/results/beelinepositive/stats12.q.out
index f6e1634..1774243 100644
--- a/ql/src/test/results/beelinepositive/stats12.q.out
+++ b/ql/src/test/results/beelinepositive/stats12.q.out
@@ -151,7 +151,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats12.db/analyze_srcpart',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -193,7 +192,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats12.db/analyze_srcpart/ds=2008-04-08/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -232,7 +230,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats12.db/analyze_srcpart/ds=2008-04-08/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -271,7 +268,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats12.db/analyze_srcpart/ds=2008-04-09/hr=11',''
 'Partition Parameters:','',''
 '','transient_lastDdlTime','!!UNIXTIME!!          '
@@ -306,7 +302,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats12.db/analyze_srcpart/ds=2008-04-09/hr=12',''
 'Partition Parameters:','',''
 '','transient_lastDdlTime','!!UNIXTIME!!          '

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats13.q.out b/ql/src/test/results/beelinepositive/stats13.q.out
index 9f2142e..17f3ef5 100644
--- a/ql/src/test/results/beelinepositive/stats13.q.out
+++ b/ql/src/test/results/beelinepositive/stats13.q.out
@@ -109,7 +109,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats13.db/analyze_srcpart',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -151,7 +150,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats13.db/analyze_srcpart/ds=2008-04-08/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -190,7 +188,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats13.db/analyze_srcpart/ds=2008-04-08/hr=12',''
 'Partition Parameters:','',''
 '','transient_lastDdlTime','!!UNIXTIME!!          '
@@ -225,7 +222,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats13.db/analyze_srcpart/ds=2008-04-09/hr=11',''
 'Partition Parameters:','',''
 '','transient_lastDdlTime','!!UNIXTIME!!          '
@@ -260,7 +256,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats13.db/analyze_srcpart/ds=2008-04-09/hr=12',''
 'Partition Parameters:','',''
 '','transient_lastDdlTime','!!UNIXTIME!!          '
@@ -298,7 +293,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats13.db/analyze_srcpart2',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats14.q.out b/ql/src/test/results/beelinepositive/stats14.q.out
index e9df282..2d10772 100644
--- a/ql/src/test/results/beelinepositive/stats14.q.out
+++ b/ql/src/test/results/beelinepositive/stats14.q.out
@@ -23,7 +23,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats14.db/stats_src',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -86,7 +85,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats14.db/stats_part',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -128,7 +126,6 @@ No rows selected
 'Table:              ','stats_part          ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats14.db/stats_part/ds=2010-04-08/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -167,7 +164,6 @@ No rows selected
 'Table:              ','stats_part          ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats14.db/stats_part/ds=2010-04-08/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -209,7 +205,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats14.db/stats_part',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats15.q.out b/ql/src/test/results/beelinepositive/stats15.q.out
index 9be8852..ceaf500 100644
--- a/ql/src/test/results/beelinepositive/stats15.q.out
+++ b/ql/src/test/results/beelinepositive/stats15.q.out
@@ -25,7 +25,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats15.db/stats_src',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -88,7 +87,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats15.db/stats_part',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -130,7 +128,6 @@ No rows selected
 'Table:              ','stats_part          ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats15.db/stats_part/ds=2010-04-08/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -169,7 +166,6 @@ No rows selected
 'Table:              ','stats_part          ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats15.db/stats_part/ds=2010-04-08/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -211,7 +207,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats15.db/stats_part',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats16.q.out b/ql/src/test/results/beelinepositive/stats16.q.out
index ea8593e..333b8d3 100644
--- a/ql/src/test/results/beelinepositive/stats16.q.out
+++ b/ql/src/test/results/beelinepositive/stats16.q.out
@@ -20,7 +20,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats16.db/stats16',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -57,7 +56,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats16.db/stats16',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats18.q.out b/ql/src/test/results/beelinepositive/stats18.q.out
index e504089..61867d4 100644
--- a/ql/src/test/results/beelinepositive/stats18.q.out
+++ b/ql/src/test/results/beelinepositive/stats18.q.out
@@ -40,7 +40,6 @@ No rows selected
 'Table:              ','stats_part          ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats18.db/stats_part/ds=2010-04-08/hr=13',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -83,7 +82,6 @@ No rows affected
 'Table:              ','stats_part          ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats18.db/stats_part/ds=2010-04-08/hr=13',''
 'Partition Parameters:','',''
 '','numFiles            ','2                   '

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats2.q.out b/ql/src/test/results/beelinepositive/stats2.q.out
index c958b06..82b338d 100644
--- a/ql/src/test/results/beelinepositive/stats2.q.out
+++ b/ql/src/test/results/beelinepositive/stats2.q.out
@@ -90,7 +90,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats2.db/analyze_t1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -154,7 +153,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats2.db/analyze_t1',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats3.q.out b/ql/src/test/results/beelinepositive/stats3.q.out
index f0680b4..d57f6b9 100644
--- a/ql/src/test/results/beelinepositive/stats3.q.out
+++ b/ql/src/test/results/beelinepositive/stats3.q.out
@@ -72,7 +72,6 @@ No rows affected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats3.db/hive_test_src',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -161,7 +160,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats3.db/hive_test_dst',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats4.q.out b/ql/src/test/results/beelinepositive/stats4.q.out
index 421b9a7..d5be03d 100644
--- a/ql/src/test/results/beelinepositive/stats4.q.out
+++ b/ql/src/test/results/beelinepositive/stats4.q.out
@@ -2266,7 +2266,6 @@ No rows selected
 'Table:              ','nzhang_part1        ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats4.db/nzhang_part1/ds=2008-04-08/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -2305,7 +2304,6 @@ No rows selected
 'Table:              ','nzhang_part1        ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats4.db/nzhang_part1/ds=2008-04-08/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -2344,7 +2342,6 @@ No rows selected
 'Table:              ','nzhang_part2        ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats4.db/nzhang_part2/ds=2008-12-31/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -2383,7 +2380,6 @@ No rows selected
 'Table:              ','nzhang_part2        ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats4.db/nzhang_part2/ds=2008-12-31/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -2422,7 +2418,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats4.db/nzhang_part1',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -2463,7 +2458,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats4.db/nzhang_part2',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats5.q.out b/ql/src/test/results/beelinepositive/stats5.q.out
index 4339503..40c8c5b 100644
--- a/ql/src/test/results/beelinepositive/stats5.q.out
+++ b/ql/src/test/results/beelinepositive/stats5.q.out
@@ -48,7 +48,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats5.db/analyze_src',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats6.q.out b/ql/src/test/results/beelinepositive/stats6.q.out
index 3d2bdcb..98a19c6 100644
--- a/ql/src/test/results/beelinepositive/stats6.q.out
+++ b/ql/src/test/results/beelinepositive/stats6.q.out
@@ -41,7 +41,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats6.db/analyze_srcpart/ds=2008-04-08/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -80,7 +79,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats6.db/analyze_srcpart/ds=2008-04-08/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -119,7 +117,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats6.db/analyze_srcpart/ds=2008-04-09/hr=11',''
 'Partition Parameters:','',''
 '','transient_lastDdlTime','!!UNIXTIME!!          '
@@ -154,7 +151,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats6.db/analyze_srcpart/ds=2008-04-09/hr=12',''
 'Partition Parameters:','',''
 '','transient_lastDdlTime','!!UNIXTIME!!          '
@@ -189,7 +185,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats6.db/analyze_srcpart',''
 'Table Type:         ','MANAGED_TABLE       ',''


[15/50] [abbrv] hive git commit: HIVE-11170 : port parts of HIVE-11015 to master for ease of future merging (Sergey Shelukhin, reviewed by Vikram Dixit K)

Posted by xu...@apache.org.
HIVE-11170 : port parts of HIVE-11015 to master for ease of future merging (Sergey Shelukhin, reviewed by Vikram Dixit K)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d89a7d1e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d89a7d1e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d89a7d1e

Branch: refs/heads/beeline-cli
Commit: d89a7d1e7fe7fb51aeb514e4357ae149158b2a34
Parents: d314425
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu Jul 9 17:50:32 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu Jul 9 17:50:32 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/FilterOperator.java     |   3 +-
 .../hive/ql/exec/mr/ExecMapperContext.java      |  10 +-
 .../ql/io/HiveContextAwareRecordReader.java     |   2 +-
 .../org/apache/hadoop/hive/ql/io/IOContext.java |  43 ------
 .../apache/hadoop/hive/ql/io/IOContextMap.java  |  81 +++++++++++
 .../hadoop/hive/ql/exec/TestOperators.java      |   3 +-
 .../ql/io/TestHiveBinarySearchRecordReader.java |   2 +-
 .../hadoop/hive/ql/io/TestIOContextMap.java     | 133 +++++++++++++++++++
 8 files changed, 223 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
index 65301c0..ae35766 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
@@ -25,6 +25,7 @@ import java.util.concurrent.Future;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.io.IOContext;
+import org.apache.hadoop.hive.ql.io.IOContextMap;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.FilterDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -61,7 +62,7 @@ public class FilterOperator extends Operator<FilterDesc> implements
       }
 
       conditionInspector = null;
-      ioContext = IOContext.get(hconf);
+      ioContext = IOContextMap.get(hconf);
     } catch (Throwable e) {
       throw new HiveException(e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
index 13d0650..fc5abfe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
@@ -22,8 +22,8 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.FetchOperator;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.IOContext;
+import org.apache.hadoop.hive.ql.io.IOContextMap;
 import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
 import org.apache.hadoop.mapred.JobConf;
 
@@ -63,11 +63,11 @@ public class ExecMapperContext {
 
   public ExecMapperContext(JobConf jc) {
     this.jc = jc;
-    ioCxt = IOContext.get(jc);
+    ioCxt = IOContextMap.get(jc);
   }
 
   public void clear() {
-    IOContext.clear();
+    IOContextMap.clear();
     ioCxt = null;
   }
 
@@ -151,8 +151,4 @@ public class ExecMapperContext {
   public IOContext getIoCxt() {
     return ioCxt;
   }
-
-  public void setIoCxt(IOContext ioCxt) {
-    this.ioCxt = ioCxt;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
index 9b3f8ec..738ca9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
@@ -162,7 +162,7 @@ public abstract class HiveContextAwareRecordReader<K, V> implements RecordReader
   }
 
   public IOContext getIOContext() {
-    return IOContext.get(jobConf);
+    return IOContextMap.get(jobConf);
   }
 
   private void initIOContext(long startPos, boolean isBlockPointer,

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
index ebad0a6..019db8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
@@ -18,13 +18,7 @@
 
 package org.apache.hadoop.hive.ql.io;
 
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 
 /**
  * IOContext basically contains the position information of the current
@@ -35,43 +29,6 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
  * nextBlockStart refers the end of current row and beginning of next row.
  */
 public class IOContext {
-
-  /**
-   * Spark uses this thread local
-   */
-  private static final ThreadLocal<IOContext> threadLocal = new ThreadLocal<IOContext>(){
-    @Override
-    protected IOContext initialValue() { return new IOContext(); }
- };
-
-  private static IOContext get() {
-    return IOContext.threadLocal.get();
-  }
-
-  /**
-   * Tez and MR use this map but are single threaded per JVM thus no synchronization is required.
-   */
-  private static final Map<String, IOContext> inputNameIOContextMap = new HashMap<String, IOContext>();
-
-
-  public static IOContext get(Configuration conf) {
-    if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
-      return get();
-    }
-    String inputName = conf.get(Utilities.INPUT_NAME);
-    if (!inputNameIOContextMap.containsKey(inputName)) {
-      IOContext ioContext = new IOContext();
-      inputNameIOContextMap.put(inputName, ioContext);
-    }
-
-    return inputNameIOContextMap.get(inputName);
-  }
-
-  public static void clear() {
-    IOContext.threadLocal.remove();
-    inputNameIOContextMap.clear();
-  }
-
   private long currentBlockStart;
   private long nextBlockStart;
   private long currentRow;

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/io/IOContextMap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContextMap.java b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContextMap.java
new file mode 100644
index 0000000..342c526
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContextMap.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io;
+
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+
+/**
+ * NOTE: before LLAP branch merge, there's no LLAP code here.
+ * There used to be a global static map of IOContext-s inside IOContext (Hive style!).
+ * Unfortunately, due to variety of factors, this is now a giant fustercluck.
+ * 1) Spark doesn't apparently care about multiple inputs, but has multiple threads, so one
+ *    threadlocal IOContext was added for it.
+ * 2) LLAP has lots of tasks in the same process so globals no longer cut it either.
+ * 3) However, Tez runs 2+ threads for one task (e.g. TezTaskEventRouter and TezChild), and these
+ *    surprisingly enough need the same context. Tez, in its infinite wisdom, doesn't allow them
+ *    to communicate in any way nor provide any shared context.
+ * So we are going to...
+ * 1) Keep the good ol' global map for MR and Tez. Hive style!
+ * 2) Keep the threadlocal for Spark. Hive style!
+ * 3) Create inheritable (TADA!) threadlocal with attemptId, only set in LLAP; that will propagate
+ *    to all the little Tez threads, and we will keep a map per attempt. Hive style squared!
+ */
+public class IOContextMap {
+  public static final String DEFAULT_CONTEXT = "";
+  private static final Log LOG = LogFactory.getLog(IOContextMap.class);
+
+  /** Used for Tez and MR */
+  private static final ConcurrentHashMap<String, IOContext> globalMap =
+      new ConcurrentHashMap<String, IOContext>();
+
+  /** Used for Spark */
+  private static final ThreadLocal<IOContext> sparkThreadLocal = new ThreadLocal<IOContext>(){
+    @Override
+    protected IOContext initialValue() { return new IOContext(); }
+  };
+
+  public static IOContext get(Configuration conf) {
+    if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
+      return sparkThreadLocal.get();
+    }
+    String inputName = conf.get(Utilities.INPUT_NAME);
+    if (inputName == null) {
+      inputName = DEFAULT_CONTEXT;
+    }
+    ConcurrentHashMap<String, IOContext> map;
+    map = globalMap;
+
+    IOContext ioContext = map.get(inputName);
+    if (ioContext != null) return ioContext;
+    ioContext = new IOContext();
+    IOContext oldContext = map.putIfAbsent(inputName, ioContext);
+    return (oldContext == null) ? ioContext : oldContext;
+  }
+
+  public static void clear() {
+    sparkThreadLocal.remove();
+    globalMap.clear();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
index 62057d8..c3a36c0 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.io.IOContext;
+import org.apache.hadoop.hive.ql.io.IOContextMap;
 import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
 import org.apache.hadoop.hive.ql.plan.CollectDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -272,7 +273,7 @@ public class TestOperators extends TestCase {
       JobConf hconf = new JobConf(TestOperators.class);
       HiveConf.setVar(hconf, HiveConf.ConfVars.HADOOPMAPFILENAME,
           "hdfs:///testDir/testFile");
-      IOContext.get(hconf).setInputPath(
+      IOContextMap.get(hconf).setInputPath(
           new Path("hdfs:///testDir/testFile"));
 
       // initialize pathToAliases

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
index 7a1748c..9dc4f5b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
@@ -116,7 +116,7 @@ public class TestHiveBinarySearchRecordReader extends TestCase {
 
   private void resetIOContext() {
     conf.set(Utilities.INPUT_NAME, "TestHiveBinarySearchRecordReader");
-    ioContext = IOContext.get(conf);
+    ioContext = IOContextMap.get(conf);
     ioContext.setUseSorted(false);
     ioContext.setBinarySearching(false);
     ioContext.setEndBinarySearch(false);

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/test/org/apache/hadoop/hive/ql/io/TestIOContextMap.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestIOContextMap.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestIOContextMap.java
new file mode 100644
index 0000000..4469353
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestIOContextMap.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io;
+
+import static org.junit.Assert.*;
+
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.FutureTask;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.junit.Test;
+
+import com.google.common.collect.Sets;
+
+public class TestIOContextMap {
+
+  private void syncThreadStart(final CountDownLatch cdlIn, final CountDownLatch cdlOut) {
+    cdlIn.countDown();
+    try {
+      cdlOut.await();
+    } catch (InterruptedException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Test
+  public void testMRTezGlobalMap() throws Exception {
+    // Tests concurrent modification, and that results are the same per input across threads
+    // but different between inputs.
+    final int THREAD_COUNT = 2, ITER_COUNT = 1000;
+    final AtomicInteger countdown = new AtomicInteger(ITER_COUNT);
+    final CountDownLatch phase1End = new CountDownLatch(THREAD_COUNT);
+    final IOContext[] results = new IOContext[ITER_COUNT];
+    ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
+    final CountDownLatch cdlIn = new CountDownLatch(THREAD_COUNT), cdlOut = new CountDownLatch(1);
+
+    @SuppressWarnings("unchecked")
+    FutureTask<Void>[] tasks = new FutureTask[THREAD_COUNT];
+    for (int i = 0; i < tasks.length; ++i) {
+      tasks[i] = new FutureTask<Void>(new Callable<Void>() {
+        public Void call() throws Exception {
+          Configuration conf = new Configuration();
+          syncThreadStart(cdlIn, cdlOut);
+          // Phase 1 - create objects.
+          while (true) {
+            int nextIx = countdown.decrementAndGet();
+            if (nextIx < 0) break;
+            conf.set(Utilities.INPUT_NAME, "Input " + nextIx);
+            results[nextIx] = IOContextMap.get(conf);
+            if (nextIx == 0) break;
+          }
+          phase1End.countDown();
+          phase1End.await();
+          // Phase 2 - verify we get the expected objects created by all threads.
+          for (int i = 0; i < ITER_COUNT; ++i) {
+            conf.set(Utilities.INPUT_NAME, "Input " + i);
+            IOContext ctx = IOContextMap.get(conf);
+            assertSame(results[i], ctx);
+          }
+          return null;
+        }
+      });
+      executor.execute(tasks[i]);
+    }
+
+    cdlIn.await(); // Wait for all threads to be ready.
+    cdlOut.countDown(); // Release them at the same time.
+    for (int i = 0; i < tasks.length; ++i) {
+      tasks[i].get();
+    }
+    Set<IOContext> resultSet = Sets.newIdentityHashSet();
+    for (int i = 0; i < results.length; ++i) {
+      assertTrue(resultSet.add(results[i])); // All the objects must be different.
+    }
+  }
+
+  @Test
+  public void testSparkThreadLocal() throws Exception {
+    // Test that input name does not change IOContext returned, and that each thread gets its own.
+    final Configuration conf1 = new Configuration();
+    conf1.set(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname, "spark");
+    final Configuration conf2 = new Configuration(conf1);
+    conf2.set(Utilities.INPUT_NAME, "Other input");
+    final int THREAD_COUNT = 2;
+    ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
+    final CountDownLatch cdlIn = new CountDownLatch(THREAD_COUNT), cdlOut = new CountDownLatch(1);
+    @SuppressWarnings("unchecked")
+    FutureTask<IOContext>[] tasks = new FutureTask[THREAD_COUNT];
+    for (int i = 0; i < tasks.length; ++i) {
+      tasks[i] = new FutureTask<IOContext>(new Callable<IOContext>() {
+        public IOContext call() throws Exception {
+          syncThreadStart(cdlIn, cdlOut);
+          IOContext c1 = IOContextMap.get(conf1), c2 = IOContextMap.get(conf2);
+          assertSame(c1, c2);
+          return c1;
+        }
+      });
+      executor.execute(tasks[i]);
+    }
+
+    cdlIn.await(); // Wait for all threads to be ready.
+    cdlOut.countDown(); // Release them at the same time.
+    Set<IOContext> results = Sets.newIdentityHashSet();
+    for (int i = 0; i < tasks.length; ++i) {
+      assertTrue(results.add(tasks[i].get())); // All the objects must be different.
+    }
+  }
+
+}


[41/50] [abbrv] hive git commit: HIVE-11158 Add tests for HPL/SQL (Dmitry Tolpeko via gates)

Posted by xu...@apache.org.
HIVE-11158 Add tests for HPL/SQL (Dmitry Tolpeko via gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7338d8e1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7338d8e1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7338d8e1

Branch: refs/heads/beeline-cli
Commit: 7338d8e11983bfe7a63aadfd82b64adef765cb67
Parents: 240097b
Author: Alan Gates <ga...@hortonworks.com>
Authored: Wed Jul 15 17:09:12 2015 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Wed Jul 15 17:09:12 2015 -0700

----------------------------------------------------------------------
 hplsql/pom.xml                                  |   9 +-
 .../main/java/org/apache/hive/hplsql/Exec.java  |  11 +-
 .../org/apache/hive/hplsql/TestHplsqlLocal.java | 330 +++++++++++++++++++
 hplsql/src/test/queries/local/add.sql           |   2 +
 hplsql/src/test/queries/local/assign.sql        |   7 +
 hplsql/src/test/queries/local/bool_expr.sql     |  47 +++
 hplsql/src/test/queries/local/break.sql         |  10 +
 hplsql/src/test/queries/local/case.sql          |  35 ++
 hplsql/src/test/queries/local/cast.sql          |   4 +
 hplsql/src/test/queries/local/char.sql          |   1 +
 hplsql/src/test/queries/local/coalesce.sql      |   4 +
 hplsql/src/test/queries/local/concat.sql        |   2 +
 .../src/test/queries/local/create_function.sql  |  11 +
 .../src/test/queries/local/create_function2.sql |  11 +
 .../src/test/queries/local/create_procedure.sql |   9 +
 hplsql/src/test/queries/local/date.sql          |   5 +
 hplsql/src/test/queries/local/dbms_output.sql   |   6 +
 hplsql/src/test/queries/local/declare.sql       |  16 +
 .../test/queries/local/declare_condition.sql    |   8 +
 .../test/queries/local/declare_condition2.sql   |  10 +
 hplsql/src/test/queries/local/decode.sql        |  10 +
 hplsql/src/test/queries/local/equal.sql         |  55 ++++
 hplsql/src/test/queries/local/exception.sql     |  14 +
 hplsql/src/test/queries/local/exception2.sql    |  10 +
 hplsql/src/test/queries/local/exception3.sql    |   5 +
 hplsql/src/test/queries/local/exception4.sql    |   7 +
 hplsql/src/test/queries/local/exception5.sql    |  10 +
 hplsql/src/test/queries/local/exit.sql          |  31 ++
 hplsql/src/test/queries/local/expr.sql          |  21 ++
 hplsql/src/test/queries/local/for_range.sql     |  20 ++
 hplsql/src/test/queries/local/if.sql            |  68 ++++
 hplsql/src/test/queries/local/instr.sql         |  49 +++
 hplsql/src/test/queries/local/interval.sql      |  15 +
 hplsql/src/test/queries/local/lang.sql          |  57 ++++
 hplsql/src/test/queries/local/leave.sql         |  33 ++
 hplsql/src/test/queries/local/len.sql           |   1 +
 hplsql/src/test/queries/local/length.sql        |   1 +
 hplsql/src/test/queries/local/lower.sql         |   1 +
 hplsql/src/test/queries/local/nvl.sql           |   4 +
 hplsql/src/test/queries/local/nvl2.sql          |   2 +
 hplsql/src/test/queries/local/print.sql         |   5 +
 hplsql/src/test/queries/local/return.sql        |   3 +
 hplsql/src/test/queries/local/seterror.sql      |  10 +
 hplsql/src/test/queries/local/sub.sql           |   1 +
 hplsql/src/test/queries/local/substr.sql        |   2 +
 hplsql/src/test/queries/local/substring.sql     |   8 +
 hplsql/src/test/queries/local/timestamp.sql     |   4 +
 hplsql/src/test/queries/local/timestamp_iso.sql |   2 +
 hplsql/src/test/queries/local/to_char.sql       |   1 +
 hplsql/src/test/queries/local/to_timestamp.sql  |   5 +
 hplsql/src/test/queries/local/trim.sql          |   1 +
 hplsql/src/test/queries/local/twopipes.sql      |   1 +
 hplsql/src/test/queries/local/upper.sql         |   1 +
 hplsql/src/test/queries/local/values_into.sql   |   6 +
 hplsql/src/test/queries/local/while.sql         |  20 ++
 hplsql/src/test/results/local/add.out.txt       |   2 +
 hplsql/src/test/results/local/assign.out.txt    |   8 +
 hplsql/src/test/results/local/bool_expr.out.txt |  32 ++
 hplsql/src/test/results/local/break.out.txt     |  29 ++
 hplsql/src/test/results/local/case.out.txt      |  12 +
 hplsql/src/test/results/local/cast.out.txt      |   8 +
 hplsql/src/test/results/local/char.out.txt      |   1 +
 hplsql/src/test/results/local/coalesce.out.txt  |   4 +
 hplsql/src/test/results/local/concat.out.txt    |   2 +
 .../test/results/local/create_function.out.txt  |   9 +
 .../test/results/local/create_function2.out.txt |  10 +
 .../test/results/local/create_procedure.out.txt |   8 +
 hplsql/src/test/results/local/date.out.txt      |   4 +
 .../src/test/results/local/dbms_output.out.txt  |   3 +
 hplsql/src/test/results/local/declare.out.txt   |  13 +
 .../results/local/declare_condition.out.txt     |   7 +
 .../results/local/declare_condition2.out.txt    |  12 +
 hplsql/src/test/results/local/decode.out.txt    |  13 +
 hplsql/src/test/results/local/equal.out.txt     |  48 +++
 hplsql/src/test/results/local/exception.out.txt |  13 +
 .../src/test/results/local/exception2.out.txt   |   5 +
 hplsql/src/test/results/local/exit.out.txt      |  42 +++
 hplsql/src/test/results/local/expr.out.txt      |  29 ++
 hplsql/src/test/results/local/for_range.out.txt |  65 ++++
 hplsql/src/test/results/local/if.out.txt        |  40 +++
 hplsql/src/test/results/local/instr.out.txt     |  33 ++
 hplsql/src/test/results/local/interval.out.txt  |  11 +
 hplsql/src/test/results/local/lang.out.txt      |  34 ++
 hplsql/src/test/results/local/leave.out.txt     |  42 +++
 hplsql/src/test/results/local/len.out.txt       |   1 +
 hplsql/src/test/results/local/length.out.txt    |   1 +
 hplsql/src/test/results/local/lower.out.txt     |   1 +
 hplsql/src/test/results/local/nvl.out.txt       |   4 +
 hplsql/src/test/results/local/nvl2.out.txt      |   2 +
 .../test/results/local/plhqlexception.out.txt   |   6 +
 .../test/results/local/plhqlexception1.out.txt  |  10 +
 .../test/results/local/plhqlexception2.out.txt  | 106 ++++++
 hplsql/src/test/results/local/print.out.txt     |   6 +
 hplsql/src/test/results/local/return.out.txt    |   3 +
 .../results/local/select_conversion.out.txt     |   9 +
 hplsql/src/test/results/local/seterror.out.txt  |   6 +
 hplsql/src/test/results/local/sub.out.txt       |   1 +
 hplsql/src/test/results/local/substr.out.txt    |   2 +
 hplsql/src/test/results/local/substring.out.txt |   8 +
 hplsql/src/test/results/local/timestamp.out.txt |   4 +
 .../test/results/local/timestamp_iso.out.txt    |   2 +
 hplsql/src/test/results/local/to_char.out.txt   |   1 +
 .../src/test/results/local/to_timestamp.out.txt |   4 +
 hplsql/src/test/results/local/trim.out.txt      |   1 +
 hplsql/src/test/results/local/twopipes.out.txt  |   1 +
 hplsql/src/test/results/local/upper.out.txt     |   1 +
 .../src/test/results/local/values_into.out.txt  |  11 +
 hplsql/src/test/results/local/while.out.txt     |  72 ++++
 108 files changed, 1821 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/pom.xml
----------------------------------------------------------------------
diff --git a/hplsql/pom.xml b/hplsql/pom.xml
index d096e90..fc1c527 100644
--- a/hplsql/pom.xml
+++ b/hplsql/pom.xml
@@ -58,7 +58,6 @@
       <artifactId>commons-logging</artifactId>
       <version>${commons-logging.version}</version>
     </dependency>
-
     <dependency>
       <groupId>commons-io</groupId>
       <artifactId>commons-io</artifactId>
@@ -74,6 +73,12 @@
        <artifactId>antlr4-runtime</artifactId>
        <version>4.5</version>
     </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>${junit.version}</version>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <profiles>
@@ -103,7 +108,7 @@
   
   <build>
     <plugins>
-      <plugin>
+     <plugin>
         <groupId>org.antlr</groupId>
           <artifactId>antlr4-maven-plugin</artifactId>
           <version>4.5</version>

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
----------------------------------------------------------------------
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
index 9ec8959..40fdc82 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
@@ -1644,7 +1644,16 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
    */
   @Override 
   public Integer visitLabel(HplsqlParser.LabelContext ctx) { 
-    exec.labels.push(ctx.L_ID().toString()); 
+    if (ctx.L_ID() != null) {
+      exec.labels.push(ctx.L_ID().toString());
+    }
+    else {
+      String label = ctx.L_LABEL().getText();
+      if (label.endsWith(":")) {
+        label = label.substring(0, label.length() - 1);
+      }
+      exec.labels.push(label);
+    }
     return 0;
   }
   

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
----------------------------------------------------------------------
diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
new file mode 100644
index 0000000..ee2be66
--- /dev/null
+++ b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
@@ -0,0 +1,330 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.hplsql;
+
+import java.io.BufferedReader;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.io.StringReader;
+import org.apache.commons.io.FileUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Unit tests for HPL/SQL (no Hive connection required)
+ */
+public class TestHplsqlLocal {
+
+  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
+
+  @Test
+  public void testAdd() throws Exception {
+    run("add");
+  }
+
+  @Test
+  public void testAssign() throws Exception {
+    run("assign");
+  }
+
+  @Test
+  public void testBoolExpr() throws Exception {
+    run("bool_expr");
+  }
+
+  @Test
+  public void testBreak() throws Exception {
+    run("break");
+  }
+
+  @Test
+  public void testCase() throws Exception {
+    run("case");
+  }
+
+  @Test
+  public void testCast() throws Exception {
+    run("cast");
+  }
+
+  @Test
+  public void testChar() throws Exception {
+    run("char");
+  }
+
+  @Test
+  public void testCoalesce() throws Exception {
+    run("coalesce");
+  }
+
+  @Test
+  public void testConcat() throws Exception {
+    run("concat");
+  }
+
+  @Test
+  public void testCreateFunction() throws Exception {
+    run("create_function");
+  }
+
+  @Test
+  public void testCreateFunction2() throws Exception {
+    run("create_function2");
+  }
+
+  @Test
+  public void testCreateProcedure() throws Exception {
+    run("create_procedure");
+  }
+
+  @Test
+  public void testDate() throws Exception {
+    run("date");
+  }
+
+  @Test
+  public void testDbmsOutput() throws Exception {
+    run("dbms_output");
+  }
+
+  @Test
+  public void testDeclare() throws Exception {
+    run("declare");
+  }
+
+  @Test
+  public void testDeclareCondition() throws Exception {
+    run("declare_condition");
+  }
+
+  @Test
+  public void testDeclareCondition2() throws Exception {
+    run("declare_condition2");
+  }
+
+  @Test
+  public void testDecode() throws Exception {
+    run("decode");
+  }
+
+  @Test
+  public void testEqual() throws Exception {
+    run("equal");
+  }
+
+  @Test
+  public void testException() throws Exception {
+    run("exception");
+  }
+
+  @Test
+  public void testException2() throws Exception {
+    run("exception2");
+  }
+
+  @Test
+  public void testException3() throws Exception {
+    run("exception2");
+  }
+
+  @Test
+  public void testException4() throws Exception {
+    run("exception2");
+  }
+
+  @Test
+  public void testException5() throws Exception {
+    run("exception2");
+  }
+
+  @Test
+  public void testExit() throws Exception {
+    run("exit");
+  }
+
+  @Test
+  public void testExpr() throws Exception {
+    run("expr");
+  }
+
+  @Test
+  public void testForRange() throws Exception {
+    run("for_range");
+  }
+
+  @Test
+  public void testIf() throws Exception {
+    run("if");
+  }
+
+  @Test
+  public void testInstr() throws Exception {
+    run("instr");
+  }
+
+  @Test
+  public void testInterval() throws Exception {
+    run("interval");
+  }
+
+  @Test
+  public void testLang() throws Exception {
+    run("lang");
+  }
+
+  @Test
+  public void testLeave() throws Exception {
+    run("leave");
+  }
+
+  @Test
+  public void testLength() throws Exception {
+    run("length");
+  }
+
+  @Test
+  public void testLen() throws Exception {
+    run("len");
+  }
+
+  @Test
+  public void testLower() throws Exception {
+    run("lower");
+  }
+
+  @Test
+  public void testNvl() throws Exception {
+    run("nvl");
+  }
+
+  @Test
+  public void testNvl2() throws Exception {
+    run("nvl2");
+  }
+
+  @Test
+  public void testPrint() throws Exception {
+    run("print");
+  }
+
+  @Test
+  public void testReturn() throws Exception {
+    run("return");
+  }
+
+  @Test
+  public void testSetError() throws Exception {
+    run("seterror");
+  }
+
+  @Test
+  public void testSub() throws Exception {
+    run("sub");
+  }
+
+  @Test
+  public void testSubstring() throws Exception {
+    run("substring");
+  }
+
+  @Test
+  public void testSubstr() throws Exception {
+    run("substr");
+  }
+
+  @Test
+  public void testTimestampIso() throws Exception {
+    run("timestamp_iso");
+  }
+
+  @Test
+  public void testTimestamp() throws Exception {
+    run("timestamp");
+  }
+
+  @Test
+  public void testToChar() throws Exception {
+    run("to_char");
+  }
+
+  @Test
+  public void testToTimestamp() throws Exception {
+    run("to_timestamp");
+  }
+
+  @Test
+  public void testTrim() throws Exception {
+    run("trim");
+  }
+
+  @Test
+  public void testTwoPipes() throws Exception {
+    run("twopipes");
+  }
+
+  @Test
+  public void testUpper() throws Exception {
+    run("upper");
+  }
+
+  @Test
+  public void testValuesInto() throws Exception {
+    run("values_into");
+  }
+
+  @Test
+  public void testWhile() throws Exception {
+    run("while");
+  }
+
+  /**
+   * Run a test file
+   */
+  void run(String testFile) throws Exception {
+    System.setOut(new PrintStream(out));
+    Exec exec = new Exec();
+    String[] args = { "-f", "src/test/queries/local/" + testFile + ".sql", "-trace" };
+    exec.init(args);
+    Var result = exec.run();
+    if (result != null) {
+      System.out.println(result.toString());
+    }
+    String s = getTestOutput(out.toString()).trim();
+    FileUtils.writeStringToFile(new java.io.File("target/tmp/log/" + testFile + ".out.txt"), s);
+    String t = FileUtils.readFileToString(new java.io.File("src/test/results/local/" + testFile + ".out.txt"), "utf-8").trim();
+    System.setOut(null);
+    Assert.assertEquals(s, t);
+  }
+
+  /**
+   * Get test output
+   */
+  String getTestOutput(String s) throws Exception {
+    StringBuilder sb = new StringBuilder();
+    BufferedReader reader = new BufferedReader(new StringReader(s));
+    String line = null;
+    while ((line = reader.readLine()) != null) {
+      if (!line.startsWith("log4j:")) {
+        sb.append(line);
+        sb.append("\n");
+      }
+    }
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/add.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/add.sql b/hplsql/src/test/queries/local/add.sql
new file mode 100644
index 0000000..2861a5f
--- /dev/null
+++ b/hplsql/src/test/queries/local/add.sql
@@ -0,0 +1,2 @@
+DATE '2014-12-31' + 1;
+1 + DATE '2014-12-31';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/assign.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/assign.sql b/hplsql/src/test/queries/local/assign.sql
new file mode 100644
index 0000000..67e6893
--- /dev/null
+++ b/hplsql/src/test/queries/local/assign.sql
@@ -0,0 +1,7 @@
+code := 'A';
+status := 1;
+count = 0;
+
+SET code = 'A';
+SET status = 1, count = 0;
+SET (count, limit) = (0, 100);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/bool_expr.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/bool_expr.sql b/hplsql/src/test/queries/local/bool_expr.sql
new file mode 100644
index 0000000..098096f
--- /dev/null
+++ b/hplsql/src/test/queries/local/bool_expr.sql
@@ -0,0 +1,47 @@
+IF 1=1 THEN
+  PRINT 'Correct';
+ELSE 
+  PRINT 'FAILED';
+END IF;
+
+IF 1=1 OR 2=2 THEN
+  PRINT 'Correct';
+ELSE 
+  PRINT 'FAILED';
+END IF;
+
+IF (1=1 OR 2=2) THEN
+  PRINT 'Correct';
+ELSE 
+  PRINT 'FAILED';
+END IF; 
+
+IF (1=1 AND 2=2 AND 3=4) THEN
+  PRINT 'FAILED';
+ELSE 
+  PRINT 'Correct';
+END IF; 
+
+IF ((1=1) AND (2=2)) THEN
+  PRINT 'Correct';
+ELSE 
+  PRINT 'FAILED';
+END IF;
+
+IF (1=1 AND (2=2)) THEN
+  PRINT 'Correct';
+ELSE 
+  PRINT 'FAILED';
+END IF;
+
+IF ((1=1) AND 2=2 AND 3=3) THEN
+  PRINT 'Correct';
+ELSE 
+  PRINT 'FAILED';
+END IF;
+
+IF ((1=1 OR 2=2) AND 2=2 AND 3=3 AND (1=2 OR 2=3)) THEN
+  PRINT 'FAILED';
+ELSE 
+  PRINT 'Correct';
+END IF;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/break.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/break.sql b/hplsql/src/test/queries/local/break.sql
new file mode 100644
index 0000000..c53535d
--- /dev/null
+++ b/hplsql/src/test/queries/local/break.sql
@@ -0,0 +1,10 @@
+DECLARE count INT DEFAULT 3;
+WHILE 1=1 BEGIN
+  PRINT 'Start of while block';
+  PRINT count;
+  SET count = count - 1;
+  IF count = 0
+    BREAK;
+  PRINT 'End of while block';
+END
+PRINT 'End of script';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/case.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/case.sql b/hplsql/src/test/queries/local/case.sql
new file mode 100644
index 0000000..5bbdda9
--- /dev/null
+++ b/hplsql/src/test/queries/local/case.sql
@@ -0,0 +1,35 @@
+PRINT CASE 1 
+        WHEN 0 THEN 'FAILED'
+        WHEN 1 THEN 'Correct' 
+        WHEN 2 THEN 'FAILED'
+        ELSE 'FAILED'
+      END 
+
+PRINT CASE 3 
+        WHEN 0 THEN 'FAILED'
+        WHEN 1 THEN 'FAILED'
+        ELSE 'Correct'
+      END       
+      
+PRINT NVL2(CASE 3 
+        WHEN 0 THEN 'FAILED'
+        WHEN 1 THEN 'FAILED'
+      END, 'FAILED', 'Correct')  
+      
+PRINT CASE  
+        WHEN 1=0 THEN 'FAILED'
+        WHEN 1=1 THEN 'Correct' 
+        WHEN 1=2 THEN 'FAILED'
+        ELSE 'FAILED'
+      END 
+
+PRINT CASE  
+        WHEN 3=0 THEN 'FAILED'
+        WHEN 3=1 THEN 'FAILED'
+        ELSE 'Correct'
+      END       
+      
+PRINT NVL2(CASE  
+        WHEN 3=0 THEN 'FAILED'
+        WHEN 3=1 THEN 'FAILED'
+      END, 'FAILED', 'Correct') 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/cast.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/cast.sql b/hplsql/src/test/queries/local/cast.sql
new file mode 100644
index 0000000..3adab22
--- /dev/null
+++ b/hplsql/src/test/queries/local/cast.sql
@@ -0,0 +1,4 @@
+CAST('Abc' AS CHAR(1));
+CAST('Abc' AS VARCHAR(2));
+CAST('Abc' AS CHAR);
+CAST(TIMESTAMP '2015-03-12 10:58:34.111' AS CHAR(10))

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/char.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/char.sql b/hplsql/src/test/queries/local/char.sql
new file mode 100644
index 0000000..2a4f779
--- /dev/null
+++ b/hplsql/src/test/queries/local/char.sql
@@ -0,0 +1 @@
+CHAR(1000)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/coalesce.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/coalesce.sql b/hplsql/src/test/queries/local/coalesce.sql
new file mode 100644
index 0000000..4b65d58
--- /dev/null
+++ b/hplsql/src/test/queries/local/coalesce.sql
@@ -0,0 +1,4 @@
+COALESCE('First non-null', 1);
+COALESCE(NULL, 'First non-null');
+COALESCE(NULL, 'First non-null', 1);
+COALESCE(NULL, NULL, 'First non-null', 1);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/concat.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/concat.sql b/hplsql/src/test/queries/local/concat.sql
new file mode 100644
index 0000000..b7769bb
--- /dev/null
+++ b/hplsql/src/test/queries/local/concat.sql
@@ -0,0 +1,2 @@
+CONCAT('a', 'b', NULL, 'c'); 
+NVL(CONCAT(NULL, NULL, NULL), 'NULL Value'); 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/create_function.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/create_function.sql b/hplsql/src/test/queries/local/create_function.sql
new file mode 100644
index 0000000..96bf290
--- /dev/null
+++ b/hplsql/src/test/queries/local/create_function.sql
@@ -0,0 +1,11 @@
+CREATE FUNCTION hello()
+ RETURNS STRING
+BEGIN
+ PRINT 'Start';
+ RETURN 'Hello, world';
+ PRINT 'Must not be printed';
+END;
+ 
+-- Call the function
+PRINT hello() || '!';
+PRINT 'End of script';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/create_function2.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/create_function2.sql b/hplsql/src/test/queries/local/create_function2.sql
new file mode 100644
index 0000000..744ea9e
--- /dev/null
+++ b/hplsql/src/test/queries/local/create_function2.sql
@@ -0,0 +1,11 @@
+CREATE FUNCTION hello2(text STRING)
+  RETURNS STRING
+BEGIN
+  PRINT 'Start';
+  RETURN 'Hello, ' || text || '!';
+  PRINT 'Must not be printed';
+END;
+ 
+-- Call the function
+PRINT hello2('wor' || 'ld');
+PRINT 'End of script';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/create_procedure.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/create_procedure.sql b/hplsql/src/test/queries/local/create_procedure.sql
new file mode 100644
index 0000000..28088a2
--- /dev/null
+++ b/hplsql/src/test/queries/local/create_procedure.sql
@@ -0,0 +1,9 @@
+CREATE PROCEDURE set_message(IN name STRING, OUT result STRING)
+BEGIN
+  SET result = 'Hello, ' || name || '!';
+END;
+ 
+DECLARE str STRING;
+CALL set_message('world', str);
+PRINT str;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/date.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/date.sql b/hplsql/src/test/queries/local/date.sql
new file mode 100644
index 0000000..2ef4743
--- /dev/null
+++ b/hplsql/src/test/queries/local/date.sql
@@ -0,0 +1,5 @@
+DATE '2014-12-20'
+
+DATE('2015-03-12');
+DATE('2015' || '-03-' || '12');
+DATE(TIMESTAMP '2015-03-12 10:58:34.111');
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/dbms_output.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/dbms_output.sql b/hplsql/src/test/queries/local/dbms_output.sql
new file mode 100644
index 0000000..37d7313
--- /dev/null
+++ b/hplsql/src/test/queries/local/dbms_output.sql
@@ -0,0 +1,6 @@
+DECLARE
+  str VARCHAR(200) DEFAULT 'Hello, world!';
+BEGIN
+  DBMS_OUTPUT.PUT_LINE('Hello, world!');
+  DBMS_OUTPUT.PUT_LINE(str);
+END;

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/declare.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/declare.sql b/hplsql/src/test/queries/local/declare.sql
new file mode 100644
index 0000000..fd02da9
--- /dev/null
+++ b/hplsql/src/test/queries/local/declare.sql
@@ -0,0 +1,16 @@
+DECLARE
+  code CHAR(10);
+  status INT := 1;
+  count SMALLINT = 0;
+  limit INT DEFAULT 100;  
+  f UTL_FILE.FILE_TYPE;
+BEGIN
+  status := 2;
+END;
+  
+DECLARE code CHAR(10);
+DECLARE status, status2 INT DEFAULT 1;
+DECLARE count SMALLINT, limit INT DEFAULT 100;  
+
+DECLARE dt DATE DEFAULT '2015-05-13';
+DECLARE ts TIMESTAMP DEFAULT '2015-05-13 11:10:01';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/declare_condition.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/declare_condition.sql b/hplsql/src/test/queries/local/declare_condition.sql
new file mode 100644
index 0000000..8739499
--- /dev/null
+++ b/hplsql/src/test/queries/local/declare_condition.sql
@@ -0,0 +1,8 @@
+DECLARE cnt_condition CONDITION;
+DECLARE EXIT HANDLER FOR cnt_condition
+  PRINT 'Condition raised';  
+IF 1 <> 2 THEN
+  SIGNAL cnt_condition;
+END IF;
+PRINT 'Must not be printed 1';
+PRINT 'Must not be printed 2';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/declare_condition2.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/declare_condition2.sql b/hplsql/src/test/queries/local/declare_condition2.sql
new file mode 100644
index 0000000..d6a6461
--- /dev/null
+++ b/hplsql/src/test/queries/local/declare_condition2.sql
@@ -0,0 +1,10 @@
+DECLARE cnt_condition CONDITION;
+DECLARE CONTINUE HANDLER FOR cnt_condition
+  PRINT 'Wrong condition';  
+DECLARE CONTINUE HANDLER FOR cnt_condition2
+  PRINT 'Condition raised';  
+IF 1 <> 2 THEN
+  SIGNAL cnt_condition2;
+END IF;
+PRINT 'Executed 1';
+PRINT 'Executed 2';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/decode.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/decode.sql b/hplsql/src/test/queries/local/decode.sql
new file mode 100644
index 0000000..a9f7c0c
--- /dev/null
+++ b/hplsql/src/test/queries/local/decode.sql
@@ -0,0 +1,10 @@
+DECLARE var1 INT DEFAULT 3;
+PRINT DECODE (var1, 1, 'A', 2, 'B', 3, 'C');
+PRINT DECODE (var1, 1, 'A', 2, 'B', 'C');
+
+SET var1 := 1;
+PRINT DECODE (var1, 1, 'A', 2, 'B', 3, 'C');
+
+SET var1 := NULL;
+PRINT DECODE (var1, 1, 'A', 2, 'B', NULL, 'C');
+PRINT DECODE (var1, 1, 'A', 2, 'B', 'C');

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/equal.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/equal.sql b/hplsql/src/test/queries/local/equal.sql
new file mode 100644
index 0000000..0de2801
--- /dev/null
+++ b/hplsql/src/test/queries/local/equal.sql
@@ -0,0 +1,55 @@
+PRINT 'Case 1 = 1';
+IF 1 = 1 THEN
+  PRINT 'Equal - Correct';
+ELSE
+  PRINT 'Not equal - Incorrect';
+END IF;
+
+PRINT 'Case 1 == 1';
+IF 1 == 1 THEN
+  PRINT 'Equal - Correct';
+ELSE
+  PRINT 'Not equal - Incorrect';
+END IF;
+
+PRINT 'Case 1 <> 3';
+IF 1 <> 3 THEN
+  PRINT 'Not equal - Correct';
+ELSE
+  PRINT 'Equal - Incorrect';
+END IF;
+
+PRINT 'Case 1 != 3';
+IF 1 != 3 THEN
+  PRINT 'Not equal - Correct';
+ELSE
+  PRINT 'Equal - Incorrect';
+END IF;
+
+PRINT 'Case 3 > 1';
+IF 3 > 1 THEN
+  PRINT 'Greater - Correct';
+ELSE
+  PRINT 'Greater - Incorrect';
+END IF;
+
+PRINT 'Case 1 < 3';
+IF 1 < 3 THEN
+  PRINT 'Less - Correct';
+ELSE
+  PRINT 'Less - Incorrect';
+END IF;
+
+PRINT 'Case 3 >= 1';
+IF 3 >= 1 THEN
+  PRINT 'Greater or equal - Correct';
+ELSE
+  PRINT 'Greater or equal - Incorrect';
+END IF;
+
+PRINT 'Case 1 <= 3';
+IF 1 <= 3 THEN
+  PRINT 'Less or equal - Correct';
+ELSE
+  PRINT 'Less or equal - Incorrect';
+END IF;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/exception.sql b/hplsql/src/test/queries/local/exception.sql
new file mode 100644
index 0000000..7ce7377
--- /dev/null
+++ b/hplsql/src/test/queries/local/exception.sql
@@ -0,0 +1,14 @@
+BEGIN
+  PRINT 'Correct';
+  WHILE 1=1 THEN
+    FETCH cur INTO v;
+    PRINT 'Incorrect - unreachable code, unknown cursor name, exception must be raised';
+  END WHILE;
+EXCEPTION WHEN OTHERS THEN
+  PRINT 'Correct';
+  PRINT 'Correct';
+  PRINT 'Correct - Exception raised';   
+  WHEN NO_DATA_FOUND THEN
+  PRINT 'Correct';
+END 
+

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception2.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/exception2.sql b/hplsql/src/test/queries/local/exception2.sql
new file mode 100644
index 0000000..3394da8
--- /dev/null
+++ b/hplsql/src/test/queries/local/exception2.sql
@@ -0,0 +1,10 @@
+DECLARE 
+  v VARCHAR(200);
+BEGIN
+  OPEN cur FOR 'SELECT c1 FROM t1';
+  FETCH cur INTO v;
+  CLOSE cur;
+EXCEPTION WHEN OTHERS THEN
+  DBMS_OUTPUT.PUT_LINE('Error');
+END 
+

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception3.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/exception3.sql b/hplsql/src/test/queries/local/exception3.sql
new file mode 100644
index 0000000..a12b853
--- /dev/null
+++ b/hplsql/src/test/queries/local/exception3.sql
@@ -0,0 +1,5 @@
+PRINT 'Correct';
+WHILE 1=1 THEN
+FETCH cur INTO v;
+PRINT 'Incorrect - unreachable code, unknown cursor name, exception must be raised';
+END WHILE;

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception4.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/exception4.sql b/hplsql/src/test/queries/local/exception4.sql
new file mode 100644
index 0000000..38d89b5
--- /dev/null
+++ b/hplsql/src/test/queries/local/exception4.sql
@@ -0,0 +1,7 @@
+PRINT 'Correct';
+DECLARE EXIT HANDLER FOR SQLEXCEPTION
+  PRINT 'Correct - Exception raised';    
+WHILE 1=1 THEN
+FETCH cur INTO v;
+PRINT 'Incorrect - unreachable code, unknown cursor name, exception must be raised';
+END WHILE;

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception5.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/exception5.sql b/hplsql/src/test/queries/local/exception5.sql
new file mode 100644
index 0000000..6232984
--- /dev/null
+++ b/hplsql/src/test/queries/local/exception5.sql
@@ -0,0 +1,10 @@
+DECLARE cnt INT := 0;
+PRINT 'Correct';
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+  PRINT 'Correct - Exception raised';    
+WHILE cnt < 10 THEN
+FETCH cur INTO v;
+PRINT cnt;
+PRINT 'Correct - exception handled';
+SET cnt = cnt + 1;
+END WHILE;

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exit.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/exit.sql b/hplsql/src/test/queries/local/exit.sql
new file mode 100644
index 0000000..d0e432b
--- /dev/null
+++ b/hplsql/src/test/queries/local/exit.sql
@@ -0,0 +1,31 @@
+DECLARE count INT DEFAULT 3;
+
+WHILE 1=1 LOOP
+  PRINT 'Start of while block';
+  PRINT count;
+  count := count - 1;
+  EXIT WHEN count = 0;
+  PRINT 'End of while block';
+END LOOP;
+
+count := 3;
+
+<<lbl>>
+WHILE 1=1 LOOP
+  PRINT 'Start of outer while block';
+  
+  WHILE 1=1 LOOP
+    PRINT 'Start of 1st inner while block';
+    EXIT;
+    PRINT 'End of 1st inner while block (NEVER SHOWN)';
+  END LOOP;
+  
+  <<lbl2>>
+  WHILE 1=1 LOOP
+    PRINT 'Start of 2nd inner while block';
+    EXIT lbl;
+    PRINT 'End of 2nd inner while block (NEVER SHOWN)';
+  END LOOP;
+  PRINT 'End of outer while block (NEVER SHOWN)';
+END LOOP;
+PRINT 'End of script';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/expr.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/expr.sql b/hplsql/src/test/queries/local/expr.sql
new file mode 100644
index 0000000..33388a2
--- /dev/null
+++ b/hplsql/src/test/queries/local/expr.sql
@@ -0,0 +1,21 @@
+PRINT 'a' || 'b';
+PRINT 'a' || 1 || 'b';
+PRINT 1 || 'a' || 'b';
+PRINT 'a' || null || 'b';
+PRINT null || 'a' || 'b';
+PRINT null || null;
+
+DECLARE c INT;
+
+PRINT 'Integer increment'; 
+c := 3;
+c := c + 1;
+PRINT c;
+
+PRINT 'Integer decrement'; 
+c := 3;
+c := c - 1;
+PRINT c; 
+
+PRINT NVL(null - 3, 'Correct');
+PRINT NVL(null + 3, 'Correct');
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/for_range.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/for_range.sql b/hplsql/src/test/queries/local/for_range.sql
new file mode 100644
index 0000000..b7af115
--- /dev/null
+++ b/hplsql/src/test/queries/local/for_range.sql
@@ -0,0 +1,20 @@
+DECLARE i INT = 3;
+PRINT i;
+
+FOR i IN 1..10 LOOP
+  PRINT i;
+END LOOP;
+
+PRINT i;
+
+FOR i IN REVERSE 10..1 LOOP
+  PRINT i;
+END LOOP;
+
+PRINT i;
+
+FOR i IN 1..10 BY 2 LOOP
+  PRINT i;
+END LOOP;
+
+PRINT i;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/if.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/if.sql b/hplsql/src/test/queries/local/if.sql
new file mode 100644
index 0000000..2de3045
--- /dev/null
+++ b/hplsql/src/test/queries/local/if.sql
@@ -0,0 +1,68 @@
+DECLARE state VARCHAR;
+DECLARE count INT;
+
+SET state = 'CA';
+SET count = 1;
+
+/*IF count = 1 THEN
+  PRINT 'True block - Correct';
+END IF;*/
+
+IF state = 'CA' THEN
+  PRINT 'True block - Correct';
+ELSE
+  PRINT 'False block - Incorrect';
+END IF;
+
+IF state = 'MA' THEN
+  PRINT 'True block - Incorrect';
+ELSE
+  PRINT 'False block - Correct';
+END IF;
+
+IF count = 4 THEN
+  PRINT 'True block - Incorrect';  
+ELSIF count = 3 THEN
+  PRINT 'True block - Incorrect';  
+ELSIF count = 2 THEN
+  PRINT 'True block - Incorrect';  
+ELSE
+  PRINT 'False block - Correct'; 
+END IF;
+
+IF count = 3 THEN
+  PRINT 'True block - Incorrect';  
+ELSIF count = 2 THEN
+  PRINT 'True block - Incorrect';  
+ELSIF count = 1 THEN
+  PRINT 'True block - Correct';  
+ELSE
+  PRINT 'False block - Incorrect'; 
+END IF;
+
+PRINT 'IS NOT NULL AND BETWEEN';
+IF 1 IS NOT NULL AND 1 BETWEEN 0 AND 100 THEN
+  PRINT 'True block - Correct';  
+ELSE
+  PRINT 'False block - Incorrect'; 
+END IF;
+
+PRINT 'Transact-SQL - Single statement';
+
+IF state = 'CA'
+  PRINT 'True block - Correct';  
+ELSE 
+  PRINT 'False block - Incorrect'; 
+
+PRINT 'Transact-SQL - BEGIN-END block';
+  
+IF state = 'CA'
+BEGIN
+  PRINT 'True block - Correct'; 
+  PRINT 'True block - Correct'; 
+END
+ELSE 
+BEGIN
+  PRINT 'False block - Incorrect'; 
+  PRINT 'False block - Incorrect'; 
+END  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/instr.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/instr.sql b/hplsql/src/test/queries/local/instr.sql
new file mode 100644
index 0000000..9cd8dca
--- /dev/null
+++ b/hplsql/src/test/queries/local/instr.sql
@@ -0,0 +1,49 @@
+IF INSTR('abc', 'b') = 2 THEN
+  PRINT 'Correct';
+ELSE
+  PRINT 'Failed';
+END IF;
+
+IF INSTR('abcabc', 'b', 3) = 5 THEN
+  PRINT 'Correct';
+ELSE
+  PRINT 'Failed';
+END IF;
+
+IF INSTR('abcabcabc', 'b', 3, 2) = 8 THEN
+  PRINT 'Correct';
+ELSE
+  PRINT 'Failed';
+END IF; 
+
+IF INSTR('abcabcabc', 'b', -3) = 5 THEN
+  PRINT 'Correct';
+ELSE
+  PRINT 'Failed';
+END IF; 
+
+IF INSTR('abcabcabc', 'b', -3, 2) = 2 THEN
+  PRINT 'Correct';
+ELSE
+  PRINT 'Failed';
+END IF;
+
+DECLARE c STRING;
+
+IF INSTR(c, 'b') IS NULL THEN
+  PRINT 'Correct';
+ELSE
+  PRINT 'Failed';
+END IF;
+
+IF INSTR(NULL, 'b') IS NULL THEN
+  PRINT 'Correct';
+ELSE
+  PRINT 'Failed';
+END IF;
+
+IF INSTR('', 'b') = 0 THEN
+  PRINT 'Correct';
+ELSE
+  PRINT 'Failed';
+END IF;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/interval.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/interval.sql b/hplsql/src/test/queries/local/interval.sql
new file mode 100644
index 0000000..7962f2d
--- /dev/null
+++ b/hplsql/src/test/queries/local/interval.sql
@@ -0,0 +1,15 @@
+DATE '2015-03-12' + 1 DAY;
+TIMESTAMP '2015-03-12' + 1 DAY;
+TIMESTAMP '2015-03-12 10:10:10.000' + 1 MICROSECOND;
+
+DATE '2015-03-12' + NVL(NULL, 3) DAYS;
+TIMESTAMP '2015-03-12' + NVL(NULL, 3) DAYS;
+
+DATE '2015-03-12' - 1 DAY;
+TIMESTAMP '2015-03-12' - 1 DAY;
+TIMESTAMP '2015-03-12 10:10:10.000' - 1 MICROSECOND;
+
+DATE '2015-03-12' - NVL(NULL, 3) DAYS;
+TIMESTAMP '2015-03-12' - NVL(NULL, 3) DAYS;
+
+TIMESTAMP '2015-03-12' - 1 DAY - 1 MICROSECOND;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/lang.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/lang.sql b/hplsql/src/test/queries/local/lang.sql
new file mode 100644
index 0000000..56f8c33
--- /dev/null
+++ b/hplsql/src/test/queries/local/lang.sql
@@ -0,0 +1,57 @@
+-- Integer literals
++1;
+1;
+0;
+-1;
+
+-- Decimal literals
+1.0;
++1.0;
+-1.0;
+-- 1.;
+-- +1.;
+-- -1.;
+-- .1;
+-- +.1;
+-- -.1;
+
+-- Identifiers
+declare abc int;
+declare abc.abc int;
+declare abc . abc1 int;
+declare "abc" int;
+declare "abc".abc int;
+declare "abc"."abc" int;
+declare "abc" . "abc1" int;
+declare [abc] int;
+declare [abc].abc int;
+declare [abc].[abc] int;
+declare [abc] . [abc1] int;
+declare `abc` int;
+declare `abc`.abc int;
+declare `abc`.`abc` int;
+declare `abc` . `abc1` int;
+declare :new.abc int;
+declare @abc int;
+declare _abc int;
+declare #abc int;
+declare ##abc int;
+declare $abc int;
+declare abc_9 int;
+
+-- Operators and expressions
++1 + 1;                 -- 2
+1 + 1;                  -- 2
+1 + -1;                 -- 0
+-- 'a' + 'b';              -- ab    
+-- 'a''b' + 'c';           -- ab''c   
+-- 'a\'b' + 'c';           -- ab\'c   
+-- 1 + '1'                 -- 2        
+-- '1' + 1                 -- 2
+-- 1 + 'a'                 -- 1a     
+-- 'a' + 1                 -- a1
+
+-1 - 1;   -- -2
+-1 - -1;  -- 0
+
+ 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/leave.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/leave.sql b/hplsql/src/test/queries/local/leave.sql
new file mode 100644
index 0000000..a4fc0d5
--- /dev/null
+++ b/hplsql/src/test/queries/local/leave.sql
@@ -0,0 +1,33 @@
+DECLARE count INT DEFAULT 3;
+lbl:
+WHILE 1=1 DO
+  PRINT 'Start of while block';
+  PRINT count;
+  SET count = count - 1;
+  IF count = 0 THEN
+    LEAVE lbl;
+  END IF;
+  PRINT 'End of while block';
+END WHILE;
+
+SET count = 3;
+
+lbl3:
+WHILE 1=1 DO
+  PRINT 'Start of outer while block';
+  
+  lbl1:
+  WHILE 1=1 DO
+    PRINT 'Start of 1st inner while block';
+    LEAVE lbl1;
+    PRINT 'End of 1st inner while block (NEVER SHOWN)';
+  END WHILE;
+  
+  lbl2:
+  WHILE 1=1 DO
+    PRINT 'Start of 2nd inner while block';
+    LEAVE lbl3;
+    PRINT 'End of 2nd inner while block (NEVER SHOWN)';
+  END WHILE;
+  PRINT 'End of outer while block (NEVER SHOWN)';
+END WHILE;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/len.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/len.sql b/hplsql/src/test/queries/local/len.sql
new file mode 100644
index 0000000..9851c49
--- /dev/null
+++ b/hplsql/src/test/queries/local/len.sql
@@ -0,0 +1 @@
+LEN('Abc ');
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/length.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/length.sql b/hplsql/src/test/queries/local/length.sql
new file mode 100644
index 0000000..42cf3cc
--- /dev/null
+++ b/hplsql/src/test/queries/local/length.sql
@@ -0,0 +1 @@
+LENGTH('Abc ');
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/lower.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/lower.sql b/hplsql/src/test/queries/local/lower.sql
new file mode 100644
index 0000000..f29b0e9
--- /dev/null
+++ b/hplsql/src/test/queries/local/lower.sql
@@ -0,0 +1 @@
+LOWER('ABC');
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/nvl.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/nvl.sql b/hplsql/src/test/queries/local/nvl.sql
new file mode 100644
index 0000000..1a843bc
--- /dev/null
+++ b/hplsql/src/test/queries/local/nvl.sql
@@ -0,0 +1,4 @@
+NVL('First non-null', 1);
+NVL(NULL, 'First non-null');
+NVL(NULL, 'First non-null', 1);
+NVL(NULL, NULL, 'First non-null', 1);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/nvl2.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/nvl2.sql b/hplsql/src/test/queries/local/nvl2.sql
new file mode 100644
index 0000000..70eeccb
--- /dev/null
+++ b/hplsql/src/test/queries/local/nvl2.sql
@@ -0,0 +1,2 @@
+NVL2('A', 'Correct', 'FAILED');
+NVL2(NULL, 'FAILED', 'Correct');

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/print.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/print.sql b/hplsql/src/test/queries/local/print.sql
new file mode 100644
index 0000000..095682b
--- /dev/null
+++ b/hplsql/src/test/queries/local/print.sql
@@ -0,0 +1,5 @@
+PRINT 1;
+PRINT 'abc';
+PRINT ('abc');
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/return.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/return.sql b/hplsql/src/test/queries/local/return.sql
new file mode 100644
index 0000000..c52e5c5
--- /dev/null
+++ b/hplsql/src/test/queries/local/return.sql
@@ -0,0 +1,3 @@
+PRINT 'Before return';
+RETURN; 
+PRINT 'Unreachable code';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/seterror.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/seterror.sql b/hplsql/src/test/queries/local/seterror.sql
new file mode 100644
index 0000000..4705677
--- /dev/null
+++ b/hplsql/src/test/queries/local/seterror.sql
@@ -0,0 +1,10 @@
+BEGIN
+SET plhql.onerror = SETERROR;
+ 
+HOST 'abcd';
+IF HOSTCODE <> 0 THEN
+  PRINT 'Correct';
+END IF;
+EXCEPTION WHEN OTHERS THEN
+  PRINT 'FAILED';
+END
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/sub.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/sub.sql b/hplsql/src/test/queries/local/sub.sql
new file mode 100644
index 0000000..a32bef7
--- /dev/null
+++ b/hplsql/src/test/queries/local/sub.sql
@@ -0,0 +1 @@
+DATE '2015-01-01' - 1
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/substr.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/substr.sql b/hplsql/src/test/queries/local/substr.sql
new file mode 100644
index 0000000..7785e39
--- /dev/null
+++ b/hplsql/src/test/queries/local/substr.sql
@@ -0,0 +1,2 @@
+SUBSTR('FAILED Correct', 8);
+SUBSTR('FAILED Correct FAILED', 8, 7); 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/substring.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/substring.sql b/hplsql/src/test/queries/local/substring.sql
new file mode 100644
index 0000000..c94a191
--- /dev/null
+++ b/hplsql/src/test/queries/local/substring.sql
@@ -0,0 +1,8 @@
+SUBSTRING('FAILED Correct', 8);
+SUBSTRING('FAILED Correct FAILED', 8, 7); 
+
+SUBSTRING('FAILED Correct' FROM 8);
+SUBSTRING('FAILED Correct FAILED' FROM 8 FOR 7); 
+
+SUBSTRING('', 8);
+SUBSTRING(NULL, 8);

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/timestamp.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/timestamp.sql b/hplsql/src/test/queries/local/timestamp.sql
new file mode 100644
index 0000000..2971cea
--- /dev/null
+++ b/hplsql/src/test/queries/local/timestamp.sql
@@ -0,0 +1,4 @@
+TIMESTAMP '2015-03-03 11:39:31.123456';
+TIMESTAMP '2015-03-03 11:39:31.123';
+TIMESTAMP '2015-03-03 11:39:31';
+TIMESTAMP '2015-03-03-11.39.31.123';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/timestamp_iso.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/timestamp_iso.sql b/hplsql/src/test/queries/local/timestamp_iso.sql
new file mode 100644
index 0000000..9bcdfe0
--- /dev/null
+++ b/hplsql/src/test/queries/local/timestamp_iso.sql
@@ -0,0 +1,2 @@
+TIMESTAMP_ISO('2015-03-12');
+TIMESTAMP_ISO(DATE '2015-03-12');
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/to_char.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/to_char.sql b/hplsql/src/test/queries/local/to_char.sql
new file mode 100644
index 0000000..339c7d6
--- /dev/null
+++ b/hplsql/src/test/queries/local/to_char.sql
@@ -0,0 +1 @@
+TO_CHAR(DATE '2015-04-02')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/to_timestamp.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/to_timestamp.sql b/hplsql/src/test/queries/local/to_timestamp.sql
new file mode 100644
index 0000000..c18f1f4
--- /dev/null
+++ b/hplsql/src/test/queries/local/to_timestamp.sql
@@ -0,0 +1,5 @@
+TO_TIMESTAMP('2015-04-02', 'YYYY-MM-DD');
+TO_TIMESTAMP('2015-04-02', 'yyyy-mm-dd');
+TO_TIMESTAMP('04/02/2015', 'mm/dd/yyyy');
+
+TO_TIMESTAMP('2015-04-02 13:51:31', 'YYYY-MM-DD HH24:MI:SS');
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/trim.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/trim.sql b/hplsql/src/test/queries/local/trim.sql
new file mode 100644
index 0000000..f8a2978
--- /dev/null
+++ b/hplsql/src/test/queries/local/trim.sql
@@ -0,0 +1 @@
+'#' || TRIM(' Hello ') || '#';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/twopipes.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/twopipes.sql b/hplsql/src/test/queries/local/twopipes.sql
new file mode 100644
index 0000000..c1d6f1d
--- /dev/null
+++ b/hplsql/src/test/queries/local/twopipes.sql
@@ -0,0 +1 @@
+'a' || 'b' || 'c'
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/upper.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/upper.sql b/hplsql/src/test/queries/local/upper.sql
new file mode 100644
index 0000000..9b3b522
--- /dev/null
+++ b/hplsql/src/test/queries/local/upper.sql
@@ -0,0 +1 @@
+UPPER('abc');
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/values_into.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/values_into.sql b/hplsql/src/test/queries/local/values_into.sql
new file mode 100644
index 0000000..e49894a
--- /dev/null
+++ b/hplsql/src/test/queries/local/values_into.sql
@@ -0,0 +1,6 @@
+VALUES 'A' INTO code;
+VALUES (0, 100) INTO (limit, count); 
+
+PRINT code;
+PRINT count;
+PRINT limit;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/while.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/while.sql b/hplsql/src/test/queries/local/while.sql
new file mode 100644
index 0000000..2dc4b54
--- /dev/null
+++ b/hplsql/src/test/queries/local/while.sql
@@ -0,0 +1,20 @@
+DECLARE count INT DEFAULT 7;
+
+WHILE count <> 0 LOOP
+  PRINT count;
+  count := count - 1;
+END LOOP;
+
+SET count = 7;
+
+WHILE count <> 0 DO
+  PRINT count;
+  SET count = count - 1;
+END WHILE;
+
+SET count = 7;
+
+WHILE count <> 0 BEGIN
+  PRINT count;
+  SET count = count - 1;
+END
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/add.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/add.out.txt b/hplsql/src/test/results/local/add.out.txt
new file mode 100644
index 0000000..37a195b
--- /dev/null
+++ b/hplsql/src/test/results/local/add.out.txt
@@ -0,0 +1,2 @@
+2015-01-01
+2015-01-01

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/assign.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/assign.out.txt b/hplsql/src/test/results/local/assign.out.txt
new file mode 100644
index 0000000..c01e270
--- /dev/null
+++ b/hplsql/src/test/results/local/assign.out.txt
@@ -0,0 +1,8 @@
+Ln:1 SET code = 'A'
+Ln:2 SET status = 1
+Ln:3 SET count = 0
+Ln:5 SET code = 'A'
+Ln:6 SET status = 1
+Ln:6 SET count = 0
+Ln:7 SET count = 0
+Ln:7 SET limit = 100

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/bool_expr.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/bool_expr.out.txt b/hplsql/src/test/results/local/bool_expr.out.txt
new file mode 100644
index 0000000..514f324
--- /dev/null
+++ b/hplsql/src/test/results/local/bool_expr.out.txt
@@ -0,0 +1,32 @@
+Ln:1 IF
+Ln:1 IF TRUE executed
+Ln:2 PRINT
+Correct
+Ln:7 IF
+Ln:7 IF TRUE executed
+Ln:8 PRINT
+Correct
+Ln:13 IF
+Ln:13 IF TRUE executed
+Ln:14 PRINT
+Correct
+Ln:19 IF
+Ln:19 ELSE executed
+Ln:22 PRINT
+Correct
+Ln:25 IF
+Ln:25 IF TRUE executed
+Ln:26 PRINT
+Correct
+Ln:31 IF
+Ln:31 IF TRUE executed
+Ln:32 PRINT
+Correct
+Ln:37 IF
+Ln:37 IF TRUE executed
+Ln:38 PRINT
+Correct
+Ln:43 IF
+Ln:43 ELSE executed
+Ln:46 PRINT
+Correct
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/break.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/break.out.txt b/hplsql/src/test/results/local/break.out.txt
new file mode 100644
index 0000000..cf907df
--- /dev/null
+++ b/hplsql/src/test/results/local/break.out.txt
@@ -0,0 +1,29 @@
+Ln:1 DECLARE count INT = 3
+Ln:2 WHILE - ENTERED
+Ln:3 PRINT
+Start of while block
+Ln:4 PRINT
+3
+Ln:5 SET count = 2
+Ln:6 IF
+Ln:8 PRINT
+End of while block
+Ln:3 PRINT
+Start of while block
+Ln:4 PRINT
+2
+Ln:5 SET count = 1
+Ln:6 IF
+Ln:8 PRINT
+End of while block
+Ln:3 PRINT
+Start of while block
+Ln:4 PRINT
+1
+Ln:5 SET count = 0
+Ln:6 IF
+Ln:6 IF TRUE executed
+Ln:7 BREAK
+Ln:2 WHILE - LEFT
+Ln:10 PRINT
+End of script
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/case.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/case.out.txt b/hplsql/src/test/results/local/case.out.txt
new file mode 100644
index 0000000..6062a1f
--- /dev/null
+++ b/hplsql/src/test/results/local/case.out.txt
@@ -0,0 +1,12 @@
+Ln:1 PRINT
+Correct
+Ln:8 PRINT
+Correct
+Ln:14 PRINT
+Correct
+Ln:19 PRINT
+Correct
+Ln:26 PRINT
+Correct
+Ln:32 PRINT
+Correct

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/cast.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/cast.out.txt b/hplsql/src/test/results/local/cast.out.txt
new file mode 100644
index 0000000..f3de493
--- /dev/null
+++ b/hplsql/src/test/results/local/cast.out.txt
@@ -0,0 +1,8 @@
+Ln:1 FUNC CAST
+A
+Ln:2 FUNC CAST
+Ab
+Ln:3 FUNC CAST
+Abc
+Ln:4 FUNC CAST
+2015-03-12

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/char.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/char.out.txt b/hplsql/src/test/results/local/char.out.txt
new file mode 100644
index 0000000..83b33d2
--- /dev/null
+++ b/hplsql/src/test/results/local/char.out.txt
@@ -0,0 +1 @@
+1000

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/coalesce.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/coalesce.out.txt b/hplsql/src/test/results/local/coalesce.out.txt
new file mode 100644
index 0000000..a111c85
--- /dev/null
+++ b/hplsql/src/test/results/local/coalesce.out.txt
@@ -0,0 +1,4 @@
+First non-null
+First non-null
+First non-null
+First non-null

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/concat.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/concat.out.txt b/hplsql/src/test/results/local/concat.out.txt
new file mode 100644
index 0000000..cdddd69
--- /dev/null
+++ b/hplsql/src/test/results/local/concat.out.txt
@@ -0,0 +1,2 @@
+abc
+NULL Value

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/create_function.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/create_function.out.txt b/hplsql/src/test/results/local/create_function.out.txt
new file mode 100644
index 0000000..b996ab4
--- /dev/null
+++ b/hplsql/src/test/results/local/create_function.out.txt
@@ -0,0 +1,9 @@
+Ln:1 CREATE FUNCTION hello
+Ln:10 PRINT
+Ln:10 EXEC FUNCTION hello
+Ln:4 PRINT
+Start
+Ln:5 RETURN
+Hello, world!
+Ln:11 PRINT
+End of script

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/create_function2.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/create_function2.out.txt b/hplsql/src/test/results/local/create_function2.out.txt
new file mode 100644
index 0000000..c8fc993
--- /dev/null
+++ b/hplsql/src/test/results/local/create_function2.out.txt
@@ -0,0 +1,10 @@
+Ln:1 CREATE FUNCTION hello2
+Ln:10 PRINT
+Ln:10 EXEC FUNCTION hello2
+Ln:10 SET PARAM text = world
+Ln:4 PRINT
+Start
+Ln:5 RETURN
+Hello, world!
+Ln:11 PRINT
+End of script

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/create_procedure.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/create_procedure.out.txt b/hplsql/src/test/results/local/create_procedure.out.txt
new file mode 100644
index 0000000..1f86916
--- /dev/null
+++ b/hplsql/src/test/results/local/create_procedure.out.txt
@@ -0,0 +1,8 @@
+Ln:1 CREATE PROCEDURE set_message
+Ln:6 DECLARE str STRING
+Ln:7 EXEC PROCEDURE set_message
+Ln:7 SET PARAM name = world
+Ln:7 SET PARAM result = null
+Ln:3 SET result = 'Hello, world!'
+Ln:8 PRINT
+Hello, world!
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/date.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/date.out.txt b/hplsql/src/test/results/local/date.out.txt
new file mode 100644
index 0000000..118bd29
--- /dev/null
+++ b/hplsql/src/test/results/local/date.out.txt
@@ -0,0 +1,4 @@
+2014-12-20
+2015-03-12
+2015-03-12
+2015-03-12

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/dbms_output.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/dbms_output.out.txt b/hplsql/src/test/results/local/dbms_output.out.txt
new file mode 100644
index 0000000..b6ed0e0
--- /dev/null
+++ b/hplsql/src/test/results/local/dbms_output.out.txt
@@ -0,0 +1,3 @@
+Ln:2 DECLARE str VARCHAR = 'Hello, world!'
+Hello, world!
+Hello, world!

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/declare.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/declare.out.txt b/hplsql/src/test/results/local/declare.out.txt
new file mode 100644
index 0000000..2b7794e
--- /dev/null
+++ b/hplsql/src/test/results/local/declare.out.txt
@@ -0,0 +1,13 @@
+Ln:2 DECLARE code CHAR
+Ln:3 DECLARE status INT = 1
+Ln:4 DECLARE count SMALLINT = NULL
+Ln:5 DECLARE limit INT = 100
+Ln:6 DECLARE f UTL_FILE.FILE_TYPE
+Ln:8 SET status = 2
+Ln:11 DECLARE code CHAR
+Ln:12 DECLARE status INT = 1
+Ln:12 DECLARE status2 INT = 1
+Ln:13 DECLARE count SMALLINT
+Ln:13 DECLARE limit INT = 100
+Ln:15 DECLARE dt DATE = 2015-05-13
+Ln:16 DECLARE ts TIMESTAMP = 2015-05-13 11:10:01

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/declare_condition.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/declare_condition.out.txt b/hplsql/src/test/results/local/declare_condition.out.txt
new file mode 100644
index 0000000..4633c8d
--- /dev/null
+++ b/hplsql/src/test/results/local/declare_condition.out.txt
@@ -0,0 +1,7 @@
+Ln:2 DECLARE HANDLER
+Ln:4 IF
+Ln:4 IF TRUE executed
+Ln:5 SIGNAL
+Ln:2 EXIT HANDLER
+Ln:3 PRINT
+Condition raised
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/declare_condition2.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/declare_condition2.out.txt b/hplsql/src/test/results/local/declare_condition2.out.txt
new file mode 100644
index 0000000..67da39d
--- /dev/null
+++ b/hplsql/src/test/results/local/declare_condition2.out.txt
@@ -0,0 +1,12 @@
+Ln:2 DECLARE HANDLER
+Ln:4 DECLARE HANDLER
+Ln:6 IF
+Ln:6 IF TRUE executed
+Ln:7 SIGNAL
+Ln:4 CONTINUE HANDLER
+Ln:5 PRINT
+Condition raised
+Ln:9 PRINT
+Executed 1
+Ln:10 PRINT
+Executed 2
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/decode.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/decode.out.txt b/hplsql/src/test/results/local/decode.out.txt
new file mode 100644
index 0000000..39b01bc
--- /dev/null
+++ b/hplsql/src/test/results/local/decode.out.txt
@@ -0,0 +1,13 @@
+Ln:1 DECLARE var1 INT = 3
+Ln:2 PRINT
+C
+Ln:3 PRINT
+C
+Ln:5 SET var1 = 1
+Ln:6 PRINT
+A
+Ln:8 SET var1 = NULL
+Ln:9 PRINT
+C
+Ln:10 PRINT
+C

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/equal.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/equal.out.txt b/hplsql/src/test/results/local/equal.out.txt
new file mode 100644
index 0000000..9c02e38
--- /dev/null
+++ b/hplsql/src/test/results/local/equal.out.txt
@@ -0,0 +1,48 @@
+Ln:1 PRINT
+Case 1 = 1
+Ln:2 IF
+Ln:2 IF TRUE executed
+Ln:3 PRINT
+Equal - Correct
+Ln:8 PRINT
+Case 1 == 1
+Ln:9 IF
+Ln:9 IF TRUE executed
+Ln:10 PRINT
+Equal - Correct
+Ln:15 PRINT
+Case 1 <> 3
+Ln:16 IF
+Ln:16 IF TRUE executed
+Ln:17 PRINT
+Not equal - Correct
+Ln:22 PRINT
+Case 1 != 3
+Ln:23 IF
+Ln:23 IF TRUE executed
+Ln:24 PRINT
+Not equal - Correct
+Ln:29 PRINT
+Case 3 > 1
+Ln:30 IF
+Ln:30 IF TRUE executed
+Ln:31 PRINT
+Greater - Correct
+Ln:36 PRINT
+Case 1 < 3
+Ln:37 IF
+Ln:37 IF TRUE executed
+Ln:38 PRINT
+Less - Correct
+Ln:43 PRINT
+Case 3 >= 1
+Ln:44 IF
+Ln:44 IF TRUE executed
+Ln:45 PRINT
+Greater or equal - Correct
+Ln:50 PRINT
+Case 1 <= 3
+Ln:51 IF
+Ln:51 IF TRUE executed
+Ln:52 PRINT
+Less or equal - Correct
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/exception.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/exception.out.txt b/hplsql/src/test/results/local/exception.out.txt
new file mode 100644
index 0000000..5de7998
--- /dev/null
+++ b/hplsql/src/test/results/local/exception.out.txt
@@ -0,0 +1,13 @@
+Ln:2 PRINT
+Correct
+Ln:3 WHILE - ENTERED
+Ln:4 FETCH
+Ln:4 Cursor not found: cur
+Ln:3 WHILE - LEFT
+Ln:7 EXCEPTION HANDLER
+Ln:8 PRINT
+Correct
+Ln:9 PRINT
+Correct
+Ln:10 PRINT
+Correct - Exception raised
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/exception2.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/exception2.out.txt b/hplsql/src/test/results/local/exception2.out.txt
new file mode 100644
index 0000000..f56a326
--- /dev/null
+++ b/hplsql/src/test/results/local/exception2.out.txt
@@ -0,0 +1,5 @@
+Ln:2 DECLARE v VARCHAR
+Ln:4 OPEN
+Ln:4 cur: SELECT c1 FROM t1
+Ln:7 EXCEPTION HANDLER
+Error

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/exit.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/exit.out.txt b/hplsql/src/test/results/local/exit.out.txt
new file mode 100644
index 0000000..0352275
--- /dev/null
+++ b/hplsql/src/test/results/local/exit.out.txt
@@ -0,0 +1,42 @@
+Ln:1 DECLARE count INT = 3
+Ln:3 WHILE - ENTERED
+Ln:4 PRINT
+Start of while block
+Ln:5 PRINT
+3
+Ln:6 SET count = 2
+Ln:7 EXIT
+Ln:8 PRINT
+End of while block
+Ln:4 PRINT
+Start of while block
+Ln:5 PRINT
+2
+Ln:6 SET count = 1
+Ln:7 EXIT
+Ln:8 PRINT
+End of while block
+Ln:4 PRINT
+Start of while block
+Ln:5 PRINT
+1
+Ln:6 SET count = 0
+Ln:7 EXIT
+Ln:3 WHILE - LEFT
+Ln:11 SET count = 3
+Ln:14 WHILE - ENTERED
+Ln:15 PRINT
+Start of outer while block
+Ln:17 WHILE - ENTERED
+Ln:18 PRINT
+Start of 1st inner while block
+Ln:19 EXIT
+Ln:17 WHILE - LEFT
+Ln:24 WHILE - ENTERED
+Ln:25 PRINT
+Start of 2nd inner while block
+Ln:26 EXIT
+Ln:24 WHILE - LEFT
+Ln:14 WHILE - LEFT
+Ln:31 PRINT
+End of script

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/expr.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/expr.out.txt b/hplsql/src/test/results/local/expr.out.txt
new file mode 100644
index 0000000..a5377e3
--- /dev/null
+++ b/hplsql/src/test/results/local/expr.out.txt
@@ -0,0 +1,29 @@
+Ln:1 PRINT
+ab
+Ln:2 PRINT
+a1b
+Ln:3 PRINT
+1ab
+Ln:4 PRINT
+ab
+Ln:5 PRINT
+ab
+Ln:6 PRINT
+null
+Ln:8 DECLARE c INT
+Ln:10 PRINT
+Integer increment
+Ln:11 SET c = 3
+Ln:12 SET c = 4
+Ln:13 PRINT
+4
+Ln:15 PRINT
+Integer decrement
+Ln:16 SET c = 3
+Ln:17 SET c = 2
+Ln:18 PRINT
+2
+Ln:20 PRINT
+Correct
+Ln:21 PRINT
+Correct
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/for_range.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/for_range.out.txt b/hplsql/src/test/results/local/for_range.out.txt
new file mode 100644
index 0000000..dc29c95
--- /dev/null
+++ b/hplsql/src/test/results/local/for_range.out.txt
@@ -0,0 +1,65 @@
+Ln:1 DECLARE i INT = 3
+Ln:2 PRINT
+3
+Ln:4 FOR RANGE - ENTERED
+Ln:5 PRINT
+1
+Ln:5 PRINT
+2
+Ln:5 PRINT
+3
+Ln:5 PRINT
+4
+Ln:5 PRINT
+5
+Ln:5 PRINT
+6
+Ln:5 PRINT
+7
+Ln:5 PRINT
+8
+Ln:5 PRINT
+9
+Ln:5 PRINT
+10
+Ln:4 FOR RANGE - LEFT
+Ln:8 PRINT
+3
+Ln:10 FOR RANGE - ENTERED
+Ln:11 PRINT
+10
+Ln:11 PRINT
+9
+Ln:11 PRINT
+8
+Ln:11 PRINT
+7
+Ln:11 PRINT
+6
+Ln:11 PRINT
+5
+Ln:11 PRINT
+4
+Ln:11 PRINT
+3
+Ln:11 PRINT
+2
+Ln:11 PRINT
+1
+Ln:10 FOR RANGE - LEFT
+Ln:14 PRINT
+3
+Ln:16 FOR RANGE - ENTERED
+Ln:17 PRINT
+1
+Ln:17 PRINT
+3
+Ln:17 PRINT
+5
+Ln:17 PRINT
+7
+Ln:17 PRINT
+9
+Ln:16 FOR RANGE - LEFT
+Ln:20 PRINT
+3

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/if.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/if.out.txt b/hplsql/src/test/results/local/if.out.txt
new file mode 100644
index 0000000..1da8142
--- /dev/null
+++ b/hplsql/src/test/results/local/if.out.txt
@@ -0,0 +1,40 @@
+Ln:1 DECLARE state VARCHAR
+Ln:2 DECLARE count INT
+Ln:4 SET state = 'CA'
+Ln:5 SET count = 1
+Ln:11 IF
+Ln:11 IF TRUE executed
+Ln:12 PRINT
+True block - Correct
+Ln:17 IF
+Ln:17 ELSE executed
+Ln:20 PRINT
+False block - Correct
+Ln:23 IF
+Ln:23 ELSE executed
+Ln:30 PRINT
+False block - Correct
+Ln:33 IF
+Ln:33 ELSE IF executed
+Ln:38 PRINT
+True block - Correct
+Ln:43 PRINT
+IS NOT NULL AND BETWEEN
+Ln:44 IF
+Ln:44 IF TRUE executed
+Ln:45 PRINT
+True block - Correct
+Ln:50 PRINT
+Transact-SQL - Single statement
+Ln:52 IF
+Ln:52 IF TRUE executed
+Ln:53 PRINT
+True block - Correct
+Ln:57 PRINT
+Transact-SQL - BEGIN-END block
+Ln:59 IF
+Ln:59 IF TRUE executed
+Ln:61 PRINT
+True block - Correct
+Ln:62 PRINT
+True block - Correct
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/instr.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/instr.out.txt b/hplsql/src/test/results/local/instr.out.txt
new file mode 100644
index 0000000..a1ed71f
--- /dev/null
+++ b/hplsql/src/test/results/local/instr.out.txt
@@ -0,0 +1,33 @@
+Ln:1 IF
+Ln:1 IF TRUE executed
+Ln:2 PRINT
+Correct
+Ln:7 IF
+Ln:7 IF TRUE executed
+Ln:8 PRINT
+Correct
+Ln:13 IF
+Ln:13 IF TRUE executed
+Ln:14 PRINT
+Correct
+Ln:19 IF
+Ln:19 IF TRUE executed
+Ln:20 PRINT
+Correct
+Ln:25 IF
+Ln:25 IF TRUE executed
+Ln:26 PRINT
+Correct
+Ln:31 DECLARE c STRING
+Ln:33 IF
+Ln:33 IF TRUE executed
+Ln:34 PRINT
+Correct
+Ln:39 IF
+Ln:39 IF TRUE executed
+Ln:40 PRINT
+Correct
+Ln:45 IF
+Ln:45 IF TRUE executed
+Ln:46 PRINT
+Correct
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/interval.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/interval.out.txt b/hplsql/src/test/results/local/interval.out.txt
new file mode 100644
index 0000000..2dcdcd5
--- /dev/null
+++ b/hplsql/src/test/results/local/interval.out.txt
@@ -0,0 +1,11 @@
+2015-03-13
+2015-03-13 00:00:00
+2015-03-12 10:10:10.001
+2015-03-15
+2015-03-15 00:00:00
+2015-03-11
+2015-03-11 00:00:00
+2015-03-12 10:10:09.999
+2015-03-09
+2015-03-09 00:00:00
+2015-03-10 23:59:59

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/lang.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/lang.out.txt b/hplsql/src/test/results/local/lang.out.txt
new file mode 100644
index 0000000..0047ec4
--- /dev/null
+++ b/hplsql/src/test/results/local/lang.out.txt
@@ -0,0 +1,34 @@
+1
+1
+0
+-1
+1.0
+1.0
+-1.0
+Ln:19 DECLARE abc int
+Ln:20 DECLARE abc.abc int
+Ln:21 DECLARE abc . abc1 int
+Ln:22 DECLARE "abc" int
+Ln:23 DECLARE "abc".abc int
+Ln:24 DECLARE "abc"."abc" int
+Ln:25 DECLARE "abc" . "abc1" int
+Ln:26 DECLARE [abc] int
+Ln:27 DECLARE [abc].abc int
+Ln:28 DECLARE [abc].[abc] int
+Ln:29 DECLARE [abc] . [abc1] int
+Ln:30 DECLARE `abc` int
+Ln:31 DECLARE `abc`.abc int
+Ln:32 DECLARE `abc`.`abc` int
+Ln:33 DECLARE `abc` . `abc1` int
+Ln:34 DECLARE :new.abc int
+Ln:35 DECLARE @abc int
+Ln:36 DECLARE _abc int
+Ln:37 DECLARE #abc int
+Ln:38 DECLARE ##abc int
+Ln:39 DECLARE $abc int
+Ln:40 DECLARE abc_9 int
+2
+2
+0
+-2
+0

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/leave.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/leave.out.txt b/hplsql/src/test/results/local/leave.out.txt
new file mode 100644
index 0000000..8e57245
--- /dev/null
+++ b/hplsql/src/test/results/local/leave.out.txt
@@ -0,0 +1,42 @@
+Ln:1 DECLARE count INT = 3
+Ln:3 WHILE - ENTERED
+Ln:4 PRINT
+Start of while block
+Ln:5 PRINT
+3
+Ln:6 SET count = 2
+Ln:7 IF
+Ln:10 PRINT
+End of while block
+Ln:4 PRINT
+Start of while block
+Ln:5 PRINT
+2
+Ln:6 SET count = 1
+Ln:7 IF
+Ln:10 PRINT
+End of while block
+Ln:4 PRINT
+Start of while block
+Ln:5 PRINT
+1
+Ln:6 SET count = 0
+Ln:7 IF
+Ln:7 IF TRUE executed
+Ln:8 LEAVE
+Ln:3 WHILE - LEFT
+Ln:13 SET count = 3
+Ln:16 WHILE - ENTERED
+Ln:17 PRINT
+Start of outer while block
+Ln:20 WHILE - ENTERED
+Ln:21 PRINT
+Start of 1st inner while block
+Ln:22 LEAVE
+Ln:20 WHILE - LEFT
+Ln:27 WHILE - ENTERED
+Ln:28 PRINT
+Start of 2nd inner while block
+Ln:29 LEAVE
+Ln:27 WHILE - LEFT
+Ln:16 WHILE - LEFT
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/len.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/len.out.txt b/hplsql/src/test/results/local/len.out.txt
new file mode 100644
index 0000000..00750ed
--- /dev/null
+++ b/hplsql/src/test/results/local/len.out.txt
@@ -0,0 +1 @@
+3

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/length.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/length.out.txt b/hplsql/src/test/results/local/length.out.txt
new file mode 100644
index 0000000..b8626c4
--- /dev/null
+++ b/hplsql/src/test/results/local/length.out.txt
@@ -0,0 +1 @@
+4

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/lower.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/lower.out.txt b/hplsql/src/test/results/local/lower.out.txt
new file mode 100644
index 0000000..8baef1b
--- /dev/null
+++ b/hplsql/src/test/results/local/lower.out.txt
@@ -0,0 +1 @@
+abc

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/nvl.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/nvl.out.txt b/hplsql/src/test/results/local/nvl.out.txt
new file mode 100644
index 0000000..a111c85
--- /dev/null
+++ b/hplsql/src/test/results/local/nvl.out.txt
@@ -0,0 +1,4 @@
+First non-null
+First non-null
+First non-null
+First non-null

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/nvl2.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/nvl2.out.txt b/hplsql/src/test/results/local/nvl2.out.txt
new file mode 100644
index 0000000..09acc48
--- /dev/null
+++ b/hplsql/src/test/results/local/nvl2.out.txt
@@ -0,0 +1,2 @@
+Correct
+Correct

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/plhqlexception.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/plhqlexception.out.txt b/hplsql/src/test/results/local/plhqlexception.out.txt
new file mode 100644
index 0000000..439cbbb
--- /dev/null
+++ b/hplsql/src/test/results/local/plhqlexception.out.txt
@@ -0,0 +1,6 @@
+Ln:1 PRINT
+Correct
+Ln:2 WHILE - ENTERED
+Ln:3 FETCH
+Ln:3 Cursor not found: cur
+Ln:2 WHILE - LEFT
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/plhqlexception1.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/plhqlexception1.out.txt b/hplsql/src/test/results/local/plhqlexception1.out.txt
new file mode 100644
index 0000000..9b88f62
--- /dev/null
+++ b/hplsql/src/test/results/local/plhqlexception1.out.txt
@@ -0,0 +1,10 @@
+Ln:1 PRINT
+Correct
+Ln:2 DECLARE HANDLER
+Ln:4 WHILE - ENTERED
+Ln:5 FETCH
+Ln:5 Cursor not found: cur
+Ln:4 WHILE - LEFT
+Ln:2 EXIT HANDLER
+Ln:3 PRINT
+Correct - Exception raised

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/plhqlexception2.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/plhqlexception2.out.txt b/hplsql/src/test/results/local/plhqlexception2.out.txt
new file mode 100644
index 0000000..74de5b2
--- /dev/null
+++ b/hplsql/src/test/results/local/plhqlexception2.out.txt
@@ -0,0 +1,106 @@
+Ln:1 DECLARE cnt INT = 0
+Ln:2 PRINT
+Correct
+Ln:3 DECLARE HANDLER
+Ln:5 WHILE - ENTERED
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+0
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 1
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+1
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 2
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+2
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 3
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+3
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 4
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+4
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 5
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+5
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 6
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+6
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 7
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+7
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 8
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+8
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 9
+Ln:6 FETCH
+Ln:6 Cursor not found: cur
+Ln:3 CONTINUE HANDLER
+Ln:4 PRINT
+Correct - Exception raised
+Ln:7 PRINT
+9
+Ln:8 PRINT
+Correct - exception handled
+Ln:9 SET cnt = 10
+Ln:5 WHILE - LEFT

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/print.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/print.out.txt b/hplsql/src/test/results/local/print.out.txt
new file mode 100644
index 0000000..65a1016
--- /dev/null
+++ b/hplsql/src/test/results/local/print.out.txt
@@ -0,0 +1,6 @@
+Ln:1 PRINT
+1
+Ln:2 PRINT
+abc
+Ln:3 PRINT
+abc
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/return.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/return.out.txt b/hplsql/src/test/results/local/return.out.txt
new file mode 100644
index 0000000..f4d5bc9
--- /dev/null
+++ b/hplsql/src/test/results/local/return.out.txt
@@ -0,0 +1,3 @@
+Ln:1 PRINT
+Before return
+Ln:2 RETURN
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/select_conversion.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/select_conversion.out.txt b/hplsql/src/test/results/local/select_conversion.out.txt
new file mode 100644
index 0000000..602304e
--- /dev/null
+++ b/hplsql/src/test/results/local/select_conversion.out.txt
@@ -0,0 +1,9 @@
+Ln:1 DECLARE v1 STRING = abc
+Ln:3 SELECT
+Ln:3 Statement:
+SELECT CONCAT('a', 'b', 'c'), CONCAT('a', 'b') FROM default.dual
+Ln:3 Not executed - offline mode set
+Ln:5 SELECT
+Ln:5 Statement:
+SELECT 'abc' AS c1, CONCAT('abc', 'abc'), NVL(NVL(CONCAT('abc', NVL(id, 1), id), 1), 1), 'abc', 'abc' AS c4 FROM default.dual t1 WHERE 'abc' = 'abc' AND (NVL(NVL('abc', 1), 1) = 'abc' or 'abc' = 'abc')
+Ln:5 Not executed - offline mode set
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/seterror.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/seterror.out.txt b/hplsql/src/test/results/local/seterror.out.txt
new file mode 100644
index 0000000..3c093cc
--- /dev/null
+++ b/hplsql/src/test/results/local/seterror.out.txt
@@ -0,0 +1,6 @@
+Ln:2 SET plhql.onerror = NULL
+Ln:4 HOST
+Ln:4 HOST Command: abcd
+Ln:8 EXCEPTION HANDLER
+Ln:9 PRINT
+FAILED
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/sub.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/sub.out.txt b/hplsql/src/test/results/local/sub.out.txt
new file mode 100644
index 0000000..c35d146
--- /dev/null
+++ b/hplsql/src/test/results/local/sub.out.txt
@@ -0,0 +1 @@
+2014-12-31
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/substr.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/substr.out.txt b/hplsql/src/test/results/local/substr.out.txt
new file mode 100644
index 0000000..09acc48
--- /dev/null
+++ b/hplsql/src/test/results/local/substr.out.txt
@@ -0,0 +1,2 @@
+Correct
+Correct

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/substring.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/substring.out.txt b/hplsql/src/test/results/local/substring.out.txt
new file mode 100644
index 0000000..820d65a
--- /dev/null
+++ b/hplsql/src/test/results/local/substring.out.txt
@@ -0,0 +1,8 @@
+Correct
+Correct
+Ln:4 FUNC SUBSTRING
+Correct
+Ln:5 FUNC SUBSTRING
+Correct
+
+null
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/timestamp.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/timestamp.out.txt b/hplsql/src/test/results/local/timestamp.out.txt
new file mode 100644
index 0000000..bb58d46
--- /dev/null
+++ b/hplsql/src/test/results/local/timestamp.out.txt
@@ -0,0 +1,4 @@
+2015-03-03 11:39:31.123
+2015-03-03 11:39:31.123
+2015-03-03 11:39:31
+2015-03-03 11:39:31.123
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/timestamp_iso.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/timestamp_iso.out.txt b/hplsql/src/test/results/local/timestamp_iso.out.txt
new file mode 100644
index 0000000..997df7f
--- /dev/null
+++ b/hplsql/src/test/results/local/timestamp_iso.out.txt
@@ -0,0 +1,2 @@
+2015-03-12 00:00:00
+2015-03-12 00:00:00

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/to_char.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/to_char.out.txt b/hplsql/src/test/results/local/to_char.out.txt
new file mode 100644
index 0000000..22e8cef
--- /dev/null
+++ b/hplsql/src/test/results/local/to_char.out.txt
@@ -0,0 +1 @@
+2015-04-02

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/to_timestamp.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/to_timestamp.out.txt b/hplsql/src/test/results/local/to_timestamp.out.txt
new file mode 100644
index 0000000..1ee7278
--- /dev/null
+++ b/hplsql/src/test/results/local/to_timestamp.out.txt
@@ -0,0 +1,4 @@
+2015-04-02 00:00:00
+2015-04-02 00:00:00
+2015-04-02 00:00:00
+2015-04-02 13:51:31

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/trim.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/trim.out.txt b/hplsql/src/test/results/local/trim.out.txt
new file mode 100644
index 0000000..bbf851d
--- /dev/null
+++ b/hplsql/src/test/results/local/trim.out.txt
@@ -0,0 +1 @@
+#Hello#

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/twopipes.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/twopipes.out.txt b/hplsql/src/test/results/local/twopipes.out.txt
new file mode 100644
index 0000000..f2ba8f8
--- /dev/null
+++ b/hplsql/src/test/results/local/twopipes.out.txt
@@ -0,0 +1 @@
+abc
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/upper.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/upper.out.txt b/hplsql/src/test/results/local/upper.out.txt
new file mode 100644
index 0000000..5da849b
--- /dev/null
+++ b/hplsql/src/test/results/local/upper.out.txt
@@ -0,0 +1 @@
+ABC

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/values_into.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/values_into.out.txt b/hplsql/src/test/results/local/values_into.out.txt
new file mode 100644
index 0000000..d698e88
--- /dev/null
+++ b/hplsql/src/test/results/local/values_into.out.txt
@@ -0,0 +1,11 @@
+Ln:1 VALUES statement
+Ln:1 SET code = A
+Ln:2 VALUES statement
+Ln:2 SET limit = 0
+Ln:2 SET count = 100
+Ln:4 PRINT
+A
+Ln:5 PRINT
+100
+Ln:6 PRINT
+0
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/while.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/while.out.txt b/hplsql/src/test/results/local/while.out.txt
new file mode 100644
index 0000000..4a3ca0e
--- /dev/null
+++ b/hplsql/src/test/results/local/while.out.txt
@@ -0,0 +1,72 @@
+Ln:1 DECLARE count INT = 7
+Ln:3 WHILE - ENTERED
+Ln:4 PRINT
+7
+Ln:5 SET count = 6
+Ln:4 PRINT
+6
+Ln:5 SET count = 5
+Ln:4 PRINT
+5
+Ln:5 SET count = 4
+Ln:4 PRINT
+4
+Ln:5 SET count = 3
+Ln:4 PRINT
+3
+Ln:5 SET count = 2
+Ln:4 PRINT
+2
+Ln:5 SET count = 1
+Ln:4 PRINT
+1
+Ln:5 SET count = 0
+Ln:3 WHILE - LEFT
+Ln:8 SET count = 7
+Ln:10 WHILE - ENTERED
+Ln:11 PRINT
+7
+Ln:12 SET count = 6
+Ln:11 PRINT
+6
+Ln:12 SET count = 5
+Ln:11 PRINT
+5
+Ln:12 SET count = 4
+Ln:11 PRINT
+4
+Ln:12 SET count = 3
+Ln:11 PRINT
+3
+Ln:12 SET count = 2
+Ln:11 PRINT
+2
+Ln:12 SET count = 1
+Ln:11 PRINT
+1
+Ln:12 SET count = 0
+Ln:10 WHILE - LEFT
+Ln:15 SET count = 7
+Ln:17 WHILE - ENTERED
+Ln:18 PRINT
+7
+Ln:19 SET count = 6
+Ln:18 PRINT
+6
+Ln:19 SET count = 5
+Ln:18 PRINT
+5
+Ln:19 SET count = 4
+Ln:18 PRINT
+4
+Ln:19 SET count = 3
+Ln:18 PRINT
+3
+Ln:19 SET count = 2
+Ln:18 PRINT
+2
+Ln:19 SET count = 1
+Ln:18 PRINT
+1
+Ln:19 SET count = 0
+Ln:17 WHILE - LEFT
\ No newline at end of file


[32/50] [abbrv] hive git commit: HIVE-11224 : AggregateStatsCache triggers java.util.ConcurrentModificationException under some conditions (Pengcheng Xiong via Thejas Nair)

Posted by xu...@apache.org.
HIVE-11224 :  AggregateStatsCache triggers java.util.ConcurrentModificationException under some conditions (Pengcheng Xiong via Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/90a2cf9e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/90a2cf9e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/90a2cf9e

Branch: refs/heads/beeline-cli
Commit: 90a2cf9e87b22d9f568701dc53c8f8ffbe520fdb
Parents: a65bcbd
Author: Pengcheng Xiong <px...@hortonworks.com>
Authored: Tue Jul 14 10:46:30 2015 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Tue Jul 14 10:46:30 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/metastore/AggregateStatsCache.java   | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/90a2cf9e/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
index 44106f5..65e2c65 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
@@ -241,7 +241,8 @@ public class AggregateStatsCache {
     // We'll count misses as we iterate
     int maxMisses = (int) maxVariance * numPartsRequested;
     for (String partName : partNames) {
-      for (Map.Entry<AggrColStats, MatchStats> entry : candidateMatchStats.entrySet()) {
+      for (Iterator<Map.Entry<AggrColStats, MatchStats>> iterator = candidateMatchStats.entrySet().iterator(); iterator.hasNext();) {
+        Map.Entry<AggrColStats, MatchStats> entry = iterator.next();
         AggrColStats candidate = entry.getKey();
         matchStats = entry.getValue();
         if (candidate.getBloomFilter().test(partName.getBytes())) {
@@ -252,7 +253,7 @@ public class AggregateStatsCache {
         // 2nd pass at removing invalid candidates
         // If misses so far exceed max tolerable misses
         if (matchStats.misses > maxMisses) {
-          candidateMatchStats.remove(candidate);
+          iterator.remove();
           continue;
         }
         // Check if this is the best match so far


[38/50] [abbrv] hive git commit: HIVE-11223: CBO (Calcite Return Path): MapJoin and SMBJoin conversion not triggered (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-11223: CBO (Calcite Return Path): MapJoin and SMBJoin conversion not triggered (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4d984bde
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4d984bde
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4d984bde

Branch: refs/heads/beeline-cli
Commit: 4d984bded32b23fd1a6306dfdd6d9b458bdf400d
Parents: 8662d9d
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Fri Jul 10 02:01:17 2015 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed Jul 15 18:37:59 2015 +0100

----------------------------------------------------------------------
 .../calcite/translator/HiveOpConverter.java     |   4 +-
 .../translator/HiveOpConverterPostProc.java     |  34 +-
 .../queries/clientpositive/cbo_rp_auto_join0.q  |   1 +
 .../queries/clientpositive/cbo_rp_auto_join1.q  |   1 +
 .../test/queries/clientpositive/cbo_rp_join0.q  |   1 +
 .../clientpositive/cbo_rp_auto_join0.q.out      |  32 +-
 .../clientpositive/cbo_rp_auto_join1.q.out      | 578 ++++++++++---------
 .../results/clientpositive/cbo_rp_join0.q.out   | 185 +++---
 8 files changed, 471 insertions(+), 365 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4d984bde/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index c711406..c54a601 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -211,7 +211,7 @@ public class HiveOpConverter {
     Map<Integer, ColumnInfo> posToNonPartColInfo = ht.getNonPartColInfoMap();
     List<Integer> neededColIndxsFrmReloptHT = scanRel.getNeededColIndxsFrmReloptHT();
     List<String> scanColNames = scanRel.getRowType().getFieldNames();
-    String tableAlias = scanRel.getTableAlias();
+    String tableAlias = scanRel.getConcatQbIDAlias();
 
     String colName;
     ColumnInfo colInfo;
@@ -251,7 +251,7 @@ public class HiveOpConverter {
     // 2. Setup TableScan
     TableScanOperator ts = (TableScanOperator) OperatorFactory.get(tsd, new RowSchema(colInfos));
 
-    topOps.put(scanRel.getConcatQbIDAlias(), ts);
+    topOps.put(tableAlias, ts);
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("Generated " + ts + " with row schema: [" + ts.getSchema() + "]");

http://git-wip-us.apache.org/repos/asf/hive/blob/4d984bde/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
index d861682..5080992 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
@@ -51,7 +51,7 @@ public class HiveOpConverterPostProc implements Transform {
   private static final Log LOG = LogFactory.getLog(HiveOpConverterPostProc.class);
 
   private ParseContext                                  pctx;
-  private Map<String, Operator<? extends OperatorDesc>> aliasToJoinOpInfo;
+  private Map<String, Operator<? extends OperatorDesc>> aliasToOpInfo;
 
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
@@ -66,11 +66,12 @@ public class HiveOpConverterPostProc implements Transform {
 
     // 1. Initialize aux data structures
     this.pctx = pctx;
-    this.aliasToJoinOpInfo = new HashMap<String, Operator<? extends OperatorDesc>>();
+    this.aliasToOpInfo = new HashMap<String, Operator<? extends OperatorDesc>>();
 
     // 2. Trigger transformation
     Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
     opRules.put(new RuleRegExp("R1", JoinOperator.getOperatorName() + "%"), new JoinAnnotate());
+    opRules.put(new RuleRegExp("R2", TableScanOperator.getOperatorName() + "%"), new TableScanAnnotate());
 
     Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
     GraphWalker ogw = new ForwardWalker(disp);
@@ -109,7 +110,7 @@ public class HiveOpConverterPostProc implements Transform {
       }
       joinOp.getConf().setBaseSrc(baseSrc);
       joinOp.getConf().setRightAliases(rightAliases);
-      joinOp.getConf().setAliasToOpInfo(aliasToJoinOpInfo);
+      joinOp.getConf().setAliasToOpInfo(aliasToOpInfo);
 
       // 2. Use self alias
       Set<String> aliases = joinOp.getSchema().getTableNames();
@@ -119,7 +120,7 @@ public class HiveOpConverterPostProc implements Transform {
                 .size() + " aliases for " + joinOp.toString());
       }
       final String joinOpAlias = aliases.iterator().next();;
-      aliasToJoinOpInfo.put(joinOpAlias, joinOp);
+      aliasToOpInfo.put(joinOpAlias, joinOp);
 
       // 3. Populate other data structures
       pctx.getJoinOps().add(joinOp);
@@ -128,4 +129,29 @@ public class HiveOpConverterPostProc implements Transform {
     }
   }
 
+
+  private class TableScanAnnotate implements NodeProcessor {
+
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+        Object... nodeOutputs) throws SemanticException {
+      TableScanOperator tableScanOp = (TableScanOperator) nd;
+
+      // 1. Get alias from topOps
+      String opAlias = null;
+      for (Map.Entry<String, Operator<? extends OperatorDesc>> topOpEntry : pctx.getTopOps().entrySet()) {
+        if (topOpEntry.getValue() == tableScanOp) {
+          opAlias = topOpEntry.getKey();
+        }
+      }
+
+      assert opAlias != null;
+
+      // 2. Add alias to 1) aliasToOpInfo and 2) opToAlias
+      aliasToOpInfo.put(opAlias, tableScanOp);
+
+      return null;
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/4d984bde/ql/src/test/queries/clientpositive/cbo_rp_auto_join0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join0.q b/ql/src/test/queries/clientpositive/cbo_rp_auto_join0.q
index ad9ebd1..f8511b1 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join0.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join0.q
@@ -1,3 +1,4 @@
+set hive.cbo.returnpath.hiveop=true;
 set hive.stats.fetch.column.stats=true;
 set hive.auto.convert.join = true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4d984bde/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
index c1f4352..096ae10 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
@@ -1,3 +1,4 @@
+set hive.cbo.returnpath.hiveop=true;
 set hive.stats.fetch.column.stats=true;
 set hive.enforce.bucketing = true;
 set hive.enforce.sorting = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/4d984bde/ql/src/test/queries/clientpositive/cbo_rp_join0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_join0.q b/ql/src/test/queries/clientpositive/cbo_rp_join0.q
index acfff75..3e3a013 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_join0.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_join0.q
@@ -1,4 +1,5 @@
 set hive.cbo.enable=true;
+set hive.cbo.returnpath.hiveop=true;
 set hive.exec.check.crossproducts=false;
 
 set hive.stats.fetch.column.stats=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/4d984bde/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out
index 6fdc935..d1bc6d4 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join0.q.out
@@ -113,13 +113,17 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
+          Select Operator
+            expressions: _col0 (type: bigint)
+            outputColumnNames: _col0
             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -242,13 +246,17 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
+          Select Operator
+            expressions: _col0 (type: bigint)
+            outputColumnNames: _col0
             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/4d984bde/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
index 79b4650..d52586f 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
@@ -95,39 +95,43 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  Group By Operator
-                    aggregations: count()
-                    mode: hash
-                    outputColumnNames: _col0
-                    Reduce Output Operator
-                      sort order: 
-                      value expressions: _col0 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                  Select Operator
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Reduce Output Operator
+                        sort order: 
+                        value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: $f0
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -182,35 +186,38 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: subq2:subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  outputColumnNames: _col0
-                  Group By Operator
-                    keys: _col0 (type: int)
-                    mode: hash
-                    outputColumnNames: _col0
-                    Reduce Output Operator
-                      key expressions: _col0 (type: int)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: int)
+                    0 key (type: int)
+                    1 key (type: int)
+                  outputColumnNames: key
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int)
           mode: mergepartial
-          outputColumnNames: _col0
+          outputColumnNames: key
           Select Operator
             Group By Operator
               aggregations: count()
@@ -234,13 +241,16 @@ STAGE PLANS:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: $f0
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -322,38 +332,41 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: src2:subq2:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  outputColumnNames: _col0
-                  Group By Operator
-                    aggregations: count()
-                    keys: _col0 (type: int)
-                    mode: hash
-                    outputColumnNames: _col0, _col1
-                    Reduce Output Operator
-                      key expressions: _col0 (type: int)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: int)
-                      value expressions: _col1 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                  outputColumnNames: key
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Group By Operator
+                      aggregations: count()
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           keys: KEY._col0 (type: int)
           mode: mergepartial
-          outputColumnNames: _col0, _col1
+          outputColumnNames: key, $f1
           File Output Operator
             compressed: false
             table:
@@ -366,27 +379,27 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: int)
+              key expressions: key (type: int)
               sort order: +
-              Map-reduce partition columns: _col0 (type: int)
-              value expressions: _col1 (type: bigint)
+              Map-reduce partition columns: key (type: int)
+              value expressions: $f1 (type: bigint)
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: int)
+              key expressions: key (type: int)
               sort order: +
-              Map-reduce partition columns: _col0 (type: int)
-              value expressions: _col1 (type: bigint)
+              Map-reduce partition columns: key (type: int)
+              value expressions: $f1 (type: bigint)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
-          outputColumnNames: _col0, _col1, _col3
+            0 key (type: int)
+            1 key (type: int)
+          outputColumnNames: key, $f1, $f10
           Select Operator
-            expressions: _col0 (type: int), _col3 (type: bigint), _col1 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
+            expressions: key (type: int), $f10 (type: bigint), $f1 (type: bigint)
+            outputColumnNames: key, cnt1, cnt11
             File Output Operator
               compressed: false
               table:
@@ -398,38 +411,41 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: src1:subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  outputColumnNames: _col0
-                  Group By Operator
-                    aggregations: count()
-                    keys: _col0 (type: int)
-                    mode: hash
-                    outputColumnNames: _col0, _col1
-                    Reduce Output Operator
-                      key expressions: _col0 (type: int)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: int)
-                      value expressions: _col1 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                  outputColumnNames: key
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Group By Operator
+                      aggregations: count()
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           keys: KEY._col0 (type: int)
           mode: mergepartial
-          outputColumnNames: _col0, _col1
+          outputColumnNames: key, $f1
           File Output Operator
             compressed: false
             table:
@@ -514,39 +530,43 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 6) (type: boolean)
               Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  Group By Operator
-                    aggregations: count()
-                    mode: hash
-                    outputColumnNames: _col0
-                    Reduce Output Operator
-                      sort order: 
-                      value expressions: _col0 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                  Select Operator
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Reduce Output Operator
+                        sort order: 
+                        value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: $f0
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -610,39 +630,43 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: b
+            alias: subq2:subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  Group By Operator
-                    aggregations: count()
-                    mode: hash
-                    outputColumnNames: _col0
-                    Reduce Output Operator
-                      sort order: 
-                      value expressions: _col0 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                  Select Operator
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Reduce Output Operator
+                        sort order: 
+                        value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: $f0
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -730,39 +754,43 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: subq2:subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  Group By Operator
-                    aggregations: count()
-                    mode: hash
-                    outputColumnNames: _col0
-                    Reduce Output Operator
-                      sort order: 
-                      value expressions: _col0 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                  Select Operator
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Reduce Output Operator
+                        sort order: 
+                        value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: $f0
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -840,39 +868,43 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 8) (type: boolean)
               Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  Group By Operator
-                    aggregations: count()
-                    mode: hash
-                    outputColumnNames: _col0
-                    Reduce Output Operator
-                      sort order: 
-                      value expressions: _col0 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                  Select Operator
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Reduce Output Operator
+                        sort order: 
+                        value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: $f0
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -927,54 +959,56 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: (key + 1) (type: int)
-              outputColumnNames: _col0
-              Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-              Filter Operator
-                predicate: _col0 is not null (type: boolean)
-                Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: (key + 1) is not null (type: boolean)
+              Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
+                  key expressions: (key + 1) (type: int)
                   sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                  Map-reduce partition columns: (key + 1) (type: int)
+                  Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
-            alias: a
+            alias: subq2:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: (key + 1) (type: int)
-              outputColumnNames: _col0
-              Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-              Filter Operator
-                predicate: _col0 is not null (type: boolean)
-                Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: (key + 1) is not null (type: boolean)
+              Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
+                  key expressions: (key + 1) (type: int)
                   sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                  Map-reduce partition columns: (key + 1) (type: int)
+                  Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
-          Statistics: Num rows: 10 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-          Group By Operator
-            aggregations: count()
-            mode: hash
-            outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            0 (key + 1) (type: int)
+            1 (key + 1) (type: int)
+          Statistics: Num rows: 5 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+          Select Operator
+            Statistics: Num rows: 5 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+            Group By Operator
+              aggregations: count()
+              mode: hash
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -988,15 +1022,19 @@ STAGE PLANS:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
+          outputColumnNames: $f0
           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1046,39 +1084,43 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 6) (type: boolean)
               Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  Group By Operator
-                    aggregations: count()
-                    mode: hash
-                    outputColumnNames: _col0
-                    Reduce Output Operator
-                      sort order: 
-                      value expressions: _col0 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                  Select Operator
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Reduce Output Operator
+                        sort order: 
+                        value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: $f0
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1134,41 +1176,45 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: a
+            alias: subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (key < 6) (type: boolean)
               Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                        Inner Join 0 to 2
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                    2 _col0 (type: int)
-                  Group By Operator
-                    aggregations: count()
-                    mode: hash
-                    outputColumnNames: _col0
-                    Reduce Output Operator
-                      sort order: 
-                      value expressions: _col0 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                    2 key (type: int)
+                  Select Operator
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Reduce Output Operator
+                        sort order: 
+                        value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: $f0
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1240,39 +1286,43 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: b
+            alias: a:subq2:subq1:a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  Group By Operator
-                    aggregations: count()
-                    mode: hash
-                    outputColumnNames: _col0
-                    Reduce Output Operator
-                      sort order: 
-                      value expressions: _col0 (type: bigint)
+                    0 key (type: int)
+                    1 key (type: int)
+                  Select Operator
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Reduce Output Operator
+                        sort order: 
+                        value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
-          outputColumnNames: _col0
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: $f0
+          Select Operator
+            expressions: $f0 (type: bigint)
+            outputColumnNames: $f0
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1335,18 +1385,18 @@ STAGE PLANS:
               Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int), value (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: key, value
                 Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  outputColumnNames: _col0, _col1, _col3
+                    0 key (type: int)
+                    1 key (type: int)
+                  outputColumnNames: key, value, value0
                   Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
-                    outputColumnNames: _col0, _col1, _col2
+                    expressions: key (type: int), value (type: string), value0 (type: string)
+                    outputColumnNames: key, val1, val2
                     File Output Operator
                       compressed: false
                       table:
@@ -1416,18 +1466,18 @@ STAGE PLANS:
               Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int), value (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: key, value
                 Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 _col0 (type: int)
-                    1 _col0 (type: int)
-                  outputColumnNames: _col0, _col1, _col3
+                    0 key (type: int)
+                    1 key (type: int)
+                  outputColumnNames: key, value, value0
                   Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
-                    outputColumnNames: _col0, _col1, _col2
+                    expressions: key (type: int), value (type: string), value0 (type: string)
+                    outputColumnNames: key, val1, val2
                     File Output Operator
                       compressed: false
                       table:

http://git-wip-us.apache.org/repos/asf/hive/blob/4d984bde/ql/src/test/results/clientpositive/cbo_rp_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_join0.q.out b/ql/src/test/results/clientpositive/cbo_rp_join0.q.out
index 1894110..93fed08 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_join0.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_join0.q.out
@@ -21,40 +21,46 @@ STAGE PLANS:
           TableScan
             alias: cbo_t1
             Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string), c_int (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-                value expressions: _col1 (type: int)
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: string), c_int (type: int)
+                outputColumnNames: key, c_int
+                Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: key (type: string)
+                  sort order: +
+                  Map-reduce partition columns: key (type: string)
+                  Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: c_int (type: int)
           TableScan
-            alias: cbo_t2
+            alias: cbo_t2:cbo_t2
             Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string), c_int (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-                value expressions: _col1 (type: int)
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: string), c_int (type: int)
+                outputColumnNames: key, c_int
+                Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: key (type: string)
+                  sort order: +
+                  Map-reduce partition columns: key (type: string)
+                  Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: c_int (type: int)
           TableScan
-            alias: cbo_t3
+            alias: cbo_t3:cbo_t3
             Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
-                key expressions: _col0 (type: string)
+                key expressions: key (type: string)
                 sort order: +
-                Map-reduce partition columns: _col0 (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Join Operator
@@ -62,18 +68,22 @@ STAGE PLANS:
                Inner Join 0 to 1
                Right Outer Join0 to 2
           keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-            2 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 400 Data size: 71200 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 400 Data size: 71200 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            0 key (type: string)
+            1 key (type: string)
+            2 key (type: string)
+          outputColumnNames: key, c_int, key0, c_int0
+          Statistics: Num rows: 324 Data size: 57672 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: key (type: string), c_int (type: int), key0 (type: string), c_int0 (type: int)
+            outputColumnNames: key, c_int, p, q
+            Statistics: Num rows: 324 Data size: 57672 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 324 Data size: 57672 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -660,54 +670,63 @@ STAGE PLANS:
           TableScan
             alias: cbo_t1
             Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string), c_int (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-                value expressions: _col1 (type: int)
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: string), c_int (type: int)
+                outputColumnNames: key, c_int
+                Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: key (type: string)
+                  sort order: +
+                  Map-reduce partition columns: key (type: string)
+                  Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: c_int (type: int)
           TableScan
-            alias: cbo_t2
+            alias: cbo_t2:cbo_t2
             Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string), c_int (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-                value expressions: _col1 (type: int)
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: string), c_int (type: int)
+                outputColumnNames: key, c_int
+                Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: key (type: string)
+                  sort order: +
+                  Map-reduce partition columns: key (type: string)
+                  Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: c_int (type: int)
           TableScan
-            alias: cbo_t3
+            alias: cbo_t3:cbo_t3
             Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
-                key expressions: _col0 (type: string)
+                key expressions: key (type: string)
                 sort order: +
-                Map-reduce partition columns: _col0 (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 20 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
-            alias: cbo_t1
+            alias: cbo_t4:cbo_t1
             Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string), c_int (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-                value expressions: _col1 (type: int)
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: key (type: string), c_int (type: int)
+                outputColumnNames: key, c_int
+                Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: key (type: string)
+                  sort order: +
+                  Map-reduce partition columns: key (type: string)
+                  Statistics: Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: c_int (type: int)
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -715,19 +734,19 @@ STAGE PLANS:
                Right Outer Join0 to 2
                Left Outer Join0 to 3
           keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-            2 _col0 (type: string)
-            3 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col6
-          Statistics: Num rows: 2000 Data size: 534000 Basic stats: COMPLETE Column stats: COMPLETE
+            0 key (type: string)
+            1 key (type: string)
+            2 key (type: string)
+            3 key (type: string)
+          outputColumnNames: key, c_int, key0, c_int0, key1, c_int1
+          Statistics: Num rows: 1458 Data size: 389286 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: string), _col6 (type: int)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-            Statistics: Num rows: 2000 Data size: 534000 Basic stats: COMPLETE Column stats: COMPLETE
+            expressions: key (type: string), c_int (type: int), key0 (type: string), c_int0 (type: int), key1 (type: string), c_int1 (type: int)
+            outputColumnNames: key, c_int, p, q, x, b
+            Statistics: Num rows: 1458 Data size: 389286 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 2000 Data size: 534000 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1458 Data size: 389286 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat


[39/50] [abbrv] hive git commit: HIVE-11211 : Reset the fields in JoinStatsRule in StatsRulesProcFactory (Pengcheng Xiong, reviewed by Laljo John Pullokkaran)

Posted by xu...@apache.org.
HIVE-11211 : Reset the fields in JoinStatsRule in StatsRulesProcFactory (Pengcheng Xiong, reviewed by Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/42326958
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/42326958
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/42326958

Branch: refs/heads/beeline-cli
Commit: 42326958148c2558be9c3d4dfe44c9e735704617
Parents: 4d984bd
Author: Hari Subramaniyan <ha...@apache.org>
Authored: Wed Jul 15 13:15:34 2015 -0700
Committer: Hari Subramaniyan <ha...@apache.org>
Committed: Wed Jul 15 13:15:34 2015 -0700

----------------------------------------------------------------------
 .../stats/annotation/StatsRulesProcFactory.java | 42 ++++++++++----------
 1 file changed, 22 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/42326958/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index 0982059..376d42c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -1013,17 +1013,14 @@ public class StatsRulesProcFactory {
    */
   public static class JoinStatsRule extends DefaultStatsRule implements NodeProcessor {
 
-    private boolean pkfkInferred = false;
-    private long newNumRows = 0;
-    private List<Operator<? extends OperatorDesc>> parents;
-    private CommonJoinOperator<? extends JoinDesc> jop;
-    private int numAttr = 1;
 
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
-      jop = (CommonJoinOperator<? extends JoinDesc>) nd;
-      parents = jop.getParentOperators();
+      long newNumRows = 0;
+      CommonJoinOperator<? extends JoinDesc> jop = (CommonJoinOperator<? extends JoinDesc>) nd;
+      List<Operator<? extends OperatorDesc>> parents = jop.getParentOperators();
+      int numAttr = 1;
       AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx;
       HiveConf conf = aspCtx.getConf();
       boolean allStatsAvail = true;
@@ -1062,7 +1059,7 @@ public class StatsRulesProcFactory {
           numAttr = keyExprs.size();
 
           // infer PK-FK relationship in single attribute join case
-          inferPKFKRelationship();
+          long inferredRowCount = inferPKFKRelationship(numAttr, parents, jop);
           // get the join keys from parent ReduceSink operators
           for (int pos = 0; pos < parents.size(); pos++) {
             ReduceSinkOperator parent = (ReduceSinkOperator) jop.getParentOperators().get(pos);
@@ -1149,7 +1146,7 @@ public class StatsRulesProcFactory {
 
           // update join statistics
           stats.setColumnStats(outColStats);
-          long newRowCount = pkfkInferred ? newNumRows : computeNewRowCount(rowCounts, denom);
+          long newRowCount = inferredRowCount !=-1 ? inferredRowCount : computeNewRowCount(rowCounts, denom);
           updateStatsForJoinType(stats, newRowCount, jop, rowCountParents);
           jop.setStatistics(stats);
 
@@ -1180,7 +1177,7 @@ public class StatsRulesProcFactory {
           }
 
           long maxDataSize = parentSizes.get(maxRowIdx);
-          long newNumRows = StatsUtils.safeMult(StatsUtils.safeMult(maxRowCount, (numParents - 1)), joinFactor);
+          newNumRows = StatsUtils.safeMult(StatsUtils.safeMult(maxRowCount, (numParents - 1)), joinFactor);
           long newDataSize = StatsUtils.safeMult(StatsUtils.safeMult(maxDataSize, (numParents - 1)), joinFactor);
           Statistics wcStats = new Statistics();
           wcStats.setNumRows(newNumRows);
@@ -1195,15 +1192,17 @@ public class StatsRulesProcFactory {
       return null;
     }
 
-    private void inferPKFKRelationship() {
+    private long inferPKFKRelationship(int numAttr, List<Operator<? extends OperatorDesc>> parents,
+        CommonJoinOperator<? extends JoinDesc> jop) {
+      long newNumRows = -1;
       if (numAttr == 1) {
         // If numAttr is 1, this means we join on one single key column.
         Map<Integer, ColStatistics> parentsWithPK = getPrimaryKeyCandidates(parents);
 
         // We only allow one single PK.
         if (parentsWithPK.size() != 1) {
-          LOG.debug("STATS-" + jop.toString() + ": detects multiple PK parents.");
-          return;
+          LOG.debug("STATS-" + jop.toString() + ": detects none/multiple PK parents.");
+          return newNumRows;
         }
         Integer pkPos = parentsWithPK.keySet().iterator().next();
         ColStatistics csPK = parentsWithPK.values().iterator().next();
@@ -1215,7 +1214,7 @@ public class StatsRulesProcFactory {
         // csfKs.size() + 1 == parents.size() means we have a single PK and all
         // the rest ops are FKs.
         if (csFKs.size() + 1 == parents.size()) {
-          getSelectivity(parents, pkPos, csPK, csFKs);
+          newNumRows = getCardinality(parents, pkPos, csPK, csFKs, jop);
 
           // some debug information
           if (isDebugEnabled) {
@@ -1236,16 +1235,17 @@ public class StatsRulesProcFactory {
           }
         }
       }
+      return newNumRows;
     }
 
     /**
-     * Get selectivity of reduce sink operators.
+     * Get cardinality of reduce sink operators.
      * @param csPK - ColStatistics for a single primary key
      * @param csFKs - ColStatistics for multiple foreign keys
      */
-    private void getSelectivity(List<Operator<? extends OperatorDesc>> ops, Integer pkPos, ColStatistics csPK,
-        Map<Integer, ColStatistics> csFKs) {
-      this.pkfkInferred = true;
+    private long getCardinality(List<Operator<? extends OperatorDesc>> ops, Integer pkPos,
+        ColStatistics csPK, Map<Integer, ColStatistics> csFKs,
+        CommonJoinOperator<? extends JoinDesc> jop) {
       double pkfkSelectivity = Double.MAX_VALUE;
       int fkInd = -1;
       // 1. We iterate through all the operators that have candidate FKs and
@@ -1290,13 +1290,15 @@ public class StatsRulesProcFactory {
           distinctVals.add(csFK.getCountDistint());
         }
       }
+      long newNumRows;
       if (csFKs.size() == 1) {
         // there is only one FK
-        this.newNumRows = newrows;
+        newNumRows = newrows;
       } else {
         // there is more than one FK
-        this.newNumRows = this.computeNewRowCount(rowCounts, getDenominator(distinctVals));
+        newNumRows = this.computeNewRowCount(rowCounts, getDenominator(distinctVals));
       }
+      return newNumRows;
     }
 
     private float getSelectivitySimpleTree(Operator<? extends OperatorDesc> op) {


[46/50] [abbrv] hive git commit: HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats7.q.out b/ql/src/test/results/beelinepositive/stats7.q.out
index 57870c5..e375cbf 100644
--- a/ql/src/test/results/beelinepositive/stats7.q.out
+++ b/ql/src/test/results/beelinepositive/stats7.q.out
@@ -61,7 +61,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats7.db/analyze_srcpart/ds=2008-04-08/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -100,7 +99,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats7.db/analyze_srcpart/ds=2008-04-08/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -139,7 +137,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats7.db/analyze_srcpart',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats8.q.out b/ql/src/test/results/beelinepositive/stats8.q.out
index 017045c..1593bba 100644
--- a/ql/src/test/results/beelinepositive/stats8.q.out
+++ b/ql/src/test/results/beelinepositive/stats8.q.out
@@ -59,7 +59,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-08/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -97,7 +96,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart',''
 'Table Type:         ','MANAGED_TABLE       ',''
@@ -165,7 +163,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-08/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -230,7 +227,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-09/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -295,7 +291,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-09/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -361,7 +356,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-08/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -400,7 +394,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-08/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -439,7 +432,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-09/hr=11',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -478,7 +470,6 @@ No rows selected
 'Table:              ','analyze_srcpart     ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-09/hr=12',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '
@@ -516,7 +507,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats9.q.out b/ql/src/test/results/beelinepositive/stats9.q.out
index 8697d61..852d816 100644
--- a/ql/src/test/results/beelinepositive/stats9.q.out
+++ b/ql/src/test/results/beelinepositive/stats9.q.out
@@ -48,7 +48,6 @@ No rows selected
 'Owner:              ','!!{user.name}!!                ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Retention:          ','0                   ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats9.db/analyze_srcbucket',''
 'Table Type:         ','MANAGED_TABLE       ',''

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats_empty_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/stats_empty_partition.q.out b/ql/src/test/results/beelinepositive/stats_empty_partition.q.out
index ab6839e..415cf90 100644
--- a/ql/src/test/results/beelinepositive/stats_empty_partition.q.out
+++ b/ql/src/test/results/beelinepositive/stats_empty_partition.q.out
@@ -33,7 +33,6 @@ No rows selected
 'Table:              ','tmptable            ',''
 'CreateTime:         ','!!TIMESTAMP!!',''
 'LastAccessTime:     ','UNKNOWN             ',''
-'Protect Mode:       ','None                ',''
 'Location:           ','!!{hive.metastore.warehouse.dir}!!/stats_empty_partition.db/tmptable/part=1',''
 'Partition Parameters:','',''
 '','numFiles            ','1                   '

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientnegative/alter_file_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_file_format.q.out b/ql/src/test/results/clientnegative/alter_file_format.q.out
index d0c470b..96f1bfb 100644
--- a/ql/src/test/results/clientnegative/alter_file_format.q.out
+++ b/ql/src/test/results/clientnegative/alter_file_format.q.out
@@ -20,7 +20,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out b/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
index 447dc3a..1cbfd75 100644
--- a/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
+++ b/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
@@ -48,7 +48,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out b/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
index 321ebe5..d03c249 100644
--- a/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
+++ b/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
@@ -65,8 +65,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_file_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_file_format.q.out b/ql/src/test/results/clientpositive/alter_file_format.q.out
index 4d6389a..c9e88f8 100644
--- a/ql/src/test/results/clientpositive/alter_file_format.q.out
+++ b/ql/src/test/results/clientpositive/alter_file_format.q.out
@@ -20,7 +20,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -59,7 +58,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -104,7 +102,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -149,7 +146,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -194,7 +190,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -239,7 +234,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -284,7 +278,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -349,8 +342,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 #### A masked pattern was here ####
 	 	 
@@ -394,8 +385,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -445,8 +434,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -496,8 +483,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -547,8 +532,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -598,8 +581,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
index 43cc4ef..cefe069 100644
--- a/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
+++ b/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
@@ -85,7 +85,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -137,7 +136,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -242,8 +240,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	src_orc_merge_test_part_stat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -293,8 +289,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	src_orc_merge_test_part_stat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -352,8 +346,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	src_orc_merge_test_part_stat	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
index d3bc389..b1dfd7c 100644
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
+++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
@@ -34,7 +34,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -82,8 +81,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -145,8 +142,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -184,7 +179,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -231,7 +225,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -269,8 +262,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -316,7 +307,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -354,8 +344,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -406,7 +394,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -454,7 +441,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -502,7 +488,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -550,7 +535,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
index 185cf1c..e5f8e7f 100644
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
+++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
@@ -32,7 +32,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -80,8 +79,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -129,7 +126,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -177,8 +173,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -226,7 +220,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -274,8 +267,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -323,7 +314,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -371,8 +361,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -420,7 +408,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -468,8 +455,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -517,7 +502,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -565,8 +549,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -614,7 +596,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -662,8 +643,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -711,7 +690,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -759,8 +737,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -808,7 +784,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -856,8 +831,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
index 86c12c7..f919f10 100644
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
+++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
@@ -33,7 +33,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -81,8 +80,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -130,7 +127,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -178,8 +174,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -227,7 +221,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -275,8 +268,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -324,7 +315,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -372,8 +362,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -421,7 +409,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -469,8 +456,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -518,7 +503,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -566,8 +550,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -615,7 +597,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -663,8 +644,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -712,7 +691,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -760,8 +738,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -809,7 +785,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -857,8 +832,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
index 42a9796..4d0f841 100644
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
+++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
@@ -35,7 +35,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -83,8 +82,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -146,8 +143,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	12                  
@@ -185,7 +180,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -232,7 +226,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -270,8 +263,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	12                  
@@ -317,7 +308,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -355,8 +345,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 #### A masked pattern was here ####
@@ -407,7 +395,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -455,7 +442,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -503,7 +489,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -551,7 +536,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
index eb08b6f..f5e8d1f 100644
--- a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
@@ -47,8 +47,6 @@ Partition Value:    	[abc]
 Database:           	default             	 
 Table:              	alter_table_partition_clusterby_sortby	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -102,8 +100,6 @@ Partition Value:    	[abc]
 Database:           	default             	 
 Table:              	alter_table_partition_clusterby_sortby	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -157,8 +153,6 @@ Partition Value:    	[abc]
 Database:           	default             	 
 Table:              	alter_table_partition_clusterby_sortby	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 #### A masked pattern was here ####
@@ -201,7 +195,6 @@ c                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_skewed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_skewed_table.q.out b/ql/src/test/results/clientpositive/alter_skewed_table.q.out
index 0e0c5b0..03904e6 100644
--- a/ql/src/test/results/clientpositive/alter_skewed_table.q.out
+++ b/ql/src/test/results/clientpositive/alter_skewed_table.q.out
@@ -20,7 +20,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -59,7 +58,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -120,7 +118,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	skew_test           	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -159,7 +156,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	skew_test           	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -214,7 +210,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	skew_test           	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -255,7 +250,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	skew_test           	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out b/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
index b03146b..40974e4 100644
--- a/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
@@ -20,7 +20,6 @@ b                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -60,7 +59,6 @@ b                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_table_serde2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_serde2.q.out b/ql/src/test/results/clientpositive/alter_table_serde2.q.out
index dc1dae3..3b63e7d 100644
--- a/ql/src/test/results/clientpositive/alter_table_serde2.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_serde2.q.out
@@ -31,7 +31,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -79,8 +78,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -130,7 +127,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -179,8 +175,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tst1                	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_view_as_select.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_view_as_select.q.out b/ql/src/test/results/clientpositive/alter_view_as_select.q.out
index ff2d860..c89c0dc 100644
--- a/ql/src/test/results/clientpositive/alter_view_as_select.q.out
+++ b/ql/src/test/results/clientpositive/alter_view_as_select.q.out
@@ -30,7 +30,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	tv                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -72,7 +71,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	tv                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -123,7 +121,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	tv                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/authorization_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_index.q.out b/ql/src/test/results/clientpositive/authorization_index.q.out
index 540d11b..adc02ad 100644
--- a/ql/src/test/results/clientpositive/authorization_index.q.out
+++ b/ql/src/test/results/clientpositive/authorization_index.q.out
@@ -28,7 +28,6 @@ _offsets            	array<bigint>
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	INDEX_TABLE         	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/bucket5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket5.q.out b/ql/src/test/results/clientpositive/bucket5.q.out
index 0c8418d..2e2984b 100644
--- a/ql/src/test/results/clientpositive/bucket5.q.out
+++ b/ql/src/test/results/clientpositive/bucket5.q.out
@@ -514,7 +514,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out b/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
index c5a253d..a5df511 100644
--- a/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
+++ b/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
@@ -31,7 +31,6 @@ col3                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -76,7 +75,6 @@ col3                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -126,7 +124,6 @@ col3                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -183,7 +180,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -233,7 +229,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -282,7 +277,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -332,7 +326,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_like.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_like.q.out b/ql/src/test/results/clientpositive/create_like.q.out
index 27142c6..c93b134 100644
--- a/ql/src/test/results/clientpositive/create_like.q.out
+++ b/ql/src/test/results/clientpositive/create_like.q.out
@@ -20,7 +20,6 @@ b                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -59,7 +58,6 @@ b                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -106,7 +104,6 @@ b                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	EXTERNAL_TABLE      	 
@@ -338,7 +335,6 @@ last_name           	string              	last name of actor playing role
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -387,7 +383,6 @@ last_name           	string              	last name of actor playing role
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -435,7 +430,6 @@ b                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -475,7 +469,6 @@ col2                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -514,7 +507,6 @@ col2                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -563,7 +555,6 @@ col2                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_like2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_like2.q.out b/ql/src/test/results/clientpositive/create_like2.q.out
index e5c6f9c..8b001a9 100644
--- a/ql/src/test/results/clientpositive/create_like2.q.out
+++ b/ql/src/test/results/clientpositive/create_like2.q.out
@@ -42,7 +42,6 @@ b                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_like_tbl_props.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out
index e151897..d7f9dd2 100644
--- a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out
+++ b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out
@@ -24,7 +24,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -68,7 +67,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -112,7 +110,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -157,7 +154,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -212,7 +208,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_like_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_like_view.q.out b/ql/src/test/results/clientpositive/create_like_view.q.out
index 0978fcf..e2dc2c4 100644
--- a/ql/src/test/results/clientpositive/create_like_view.q.out
+++ b/ql/src/test/results/clientpositive/create_like_view.q.out
@@ -48,7 +48,6 @@ b                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -105,7 +104,6 @@ b                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -160,7 +158,6 @@ b                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	EXTERNAL_TABLE      	 
@@ -281,7 +278,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_or_replace_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_or_replace_view.q.out b/ql/src/test/results/clientpositive/create_or_replace_view.q.out
index aab78ba..dd5bf13 100644
--- a/ql/src/test/results/clientpositive/create_or_replace_view.q.out
+++ b/ql/src/test/results/clientpositive/create_or_replace_view.q.out
@@ -30,7 +30,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	vt                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -121,7 +120,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	vt                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -215,7 +213,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	vt                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -286,7 +283,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	vt                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -378,7 +374,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	vt                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_skewed_table1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_skewed_table1.q.out b/ql/src/test/results/clientpositive/create_skewed_table1.q.out
index e8d85a5..415bb77 100644
--- a/ql/src/test/results/clientpositive/create_skewed_table1.q.out
+++ b/ql/src/test/results/clientpositive/create_skewed_table1.q.out
@@ -36,7 +36,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -69,7 +68,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -103,7 +101,6 @@ col3                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_view.q.out b/ql/src/test/results/clientpositive/create_view.q.out
index 67e87b9..1038d01 100644
--- a/ql/src/test/results/clientpositive/create_view.q.out
+++ b/ql/src/test/results/clientpositive/create_view.q.out
@@ -250,7 +250,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -300,7 +299,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -347,7 +345,6 @@ valoo               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -396,7 +393,6 @@ valoo               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -761,7 +757,6 @@ c                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -842,7 +837,6 @@ m                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -921,7 +915,6 @@ m                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -990,7 +983,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -1067,7 +1059,6 @@ boom                	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -1140,7 +1131,6 @@ mycol               	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -1226,7 +1216,6 @@ key                 	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -1348,7 +1337,6 @@ v2                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -1465,7 +1453,6 @@ value_count         	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -1551,7 +1538,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_view_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_view_partitioned.q.out b/ql/src/test/results/clientpositive/create_view_partitioned.q.out
index ebf9a6b..caa2251 100644
--- a/ql/src/test/results/clientpositive/create_view_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/create_view_partitioned.q.out
@@ -71,7 +71,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -279,7 +278,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -393,7 +391,6 @@ v                   	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_view_translate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_view_translate.q.out b/ql/src/test/results/clientpositive/create_view_translate.q.out
index fd74058..886a01b 100644
--- a/ql/src/test/results/clientpositive/create_view_translate.q.out
+++ b/ql/src/test/results/clientpositive/create_view_translate.q.out
@@ -29,7 +29,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 
@@ -75,7 +74,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 Table Type:         	VIRTUAL_VIEW        	 
 Table Parameters:	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out
index 6f17a74..24cabc5 100644
--- a/ql/src/test/results/clientpositive/ctas.q.out
+++ b/ql/src/test/results/clientpositive/ctas.q.out
@@ -146,7 +146,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -294,7 +293,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -442,7 +440,6 @@ conb                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -507,7 +504,6 @@ conb                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -656,7 +652,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/ctas_colname.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out
index 3ecfbf2..232d505 100644
--- a/ql/src/test/results/clientpositive/ctas_colname.q.out
+++ b/ql/src/test/results/clientpositive/ctas_colname.q.out
@@ -103,7 +103,6 @@ c2                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -261,7 +260,6 @@ rr                  	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -449,7 +447,6 @@ lead1               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -620,7 +617,6 @@ _c1                 	double
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -769,7 +765,6 @@ _c1                 	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -1205,7 +1200,6 @@ _c1                 	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -1343,7 +1337,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_hadoop20.q.out b/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
index b76028b..4af1e0a 100644
--- a/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
+++ b/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
@@ -139,7 +139,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -283,7 +282,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -428,7 +426,6 @@ conb                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -493,7 +490,6 @@ conb                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -638,7 +634,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
index 64d9cb3..554ae48 100644
--- a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
+++ b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
@@ -138,7 +138,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	db1                 	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/database_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/database_location.q.out b/ql/src/test/results/clientpositive/database_location.q.out
index 159a8e2..797177d 100644
--- a/ql/src/test/results/clientpositive/database_location.q.out
+++ b/ql/src/test/results/clientpositive/database_location.q.out
@@ -39,7 +39,6 @@ value               	int
 # Detailed Table Information	 	 
 Database:           	db1                 	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -110,7 +109,6 @@ value               	int
 # Detailed Table Information	 	 
 Database:           	db2                 	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/decimal_serde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_serde.q.out b/ql/src/test/results/clientpositive/decimal_serde.q.out
index d651799..0783d9a 100644
--- a/ql/src/test/results/clientpositive/decimal_serde.q.out
+++ b/ql/src/test/results/clientpositive/decimal_serde.q.out
@@ -110,7 +110,6 @@ value               	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -162,7 +161,6 @@ value               	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/default_file_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/default_file_format.q.out b/ql/src/test/results/clientpositive/default_file_format.q.out
index 1ffba08..3d5c20f 100644
--- a/ql/src/test/results/clientpositive/default_file_format.q.out
+++ b/ql/src/test/results/clientpositive/default_file_format.q.out
@@ -55,7 +55,6 @@ c                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -85,7 +84,6 @@ c                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -115,7 +113,6 @@ c                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -145,7 +142,6 @@ c                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	EXTERNAL_TABLE      	 
@@ -176,7 +172,6 @@ c                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/describe_comment_indent.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/describe_comment_indent.q.out b/ql/src/test/results/clientpositive/describe_comment_indent.q.out
index 5ded495..3e0f45e 100644
--- a/ql/src/test/results/clientpositive/describe_comment_indent.q.out
+++ b/ql/src/test/results/clientpositive/describe_comment_indent.q.out
@@ -56,7 +56,6 @@ col3                	string              	col3
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out b/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out
index 12f385d..b202e65 100644
--- a/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out
+++ b/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out
@@ -49,7 +49,6 @@ col3                	string              	わご_col3
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out
index 18768d9..2c8b0b0 100644
--- a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out
@@ -55,8 +55,6 @@ Partition Value:    	[val_86]
 Database:           	default             	 
 Table:              	view_partitioned    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 #### A masked pattern was here ####
 PREHOOK: query: DROP VIEW view_partitioned


[40/50] [abbrv] hive git commit: HIVE-11124. Move OrcRecordUpdater.getAcidEventFields to RecordReaderFactory.

Posted by xu...@apache.org.
HIVE-11124. Move OrcRecordUpdater.getAcidEventFields to RecordReaderFactory.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/240097b7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/240097b7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/240097b7

Branch: refs/heads/beeline-cli
Commit: 240097b78b70172e1cf9bc37876a566ddfb9e115
Parents: 4232695
Author: Owen O'Malley <om...@apache.org>
Authored: Thu Jun 25 22:02:47 2015 -0700
Committer: Owen O'Malley <om...@apache.org>
Committed: Wed Jul 15 14:41:42 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java    | 5 -----
 .../org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java | 7 ++++++-
 2 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/240097b7/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index e4651b8..2220b8e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -207,11 +207,6 @@ public class OrcRecordUpdater implements RecordUpdater {
     return new OrcStruct.OrcStructInspector(fields);
   }
 
-  public static List<String> getAcidEventFields() {
-    return Lists.newArrayList("operation", "originalTransaction", "bucket", "rowId",
-        "currentTransaction", "row");
-  }
-
   OrcRecordUpdater(Path path,
                    AcidOutputFormat.Options options) throws IOException {
     this.options = options;

http://git-wip-us.apache.org/repos/asf/hive/blob/240097b7/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java
index 8740ee6..23a9af4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java
@@ -72,9 +72,14 @@ public class RecordReaderFactory {
     }
   }
 
+  static List<String> getAcidEventFields() {
+    return Lists.newArrayList("operation", "originalTransaction", "bucket",
+        "rowId", "currentTransaction", "row");
+  }
+
   private static boolean checkAcidSchema(List<OrcProto.Type> fileSchema) {
     if (fileSchema.get(0).getKind().equals(OrcProto.Type.Kind.STRUCT)) {
-      List<String> acidFields = OrcRecordUpdater.getAcidEventFields();
+      List<String> acidFields = getAcidEventFields();
       List<String> rootFields = fileSchema.get(0).getFieldNamesList();
       if (acidFields.equals(rootFields)) {
         return true;


[10/50] [abbrv] hive git commit: HIVE-10895: ObjectStore does not close Query objects in some calls, causing a potential leak in some metastore db resources (Aihua Xu reviewed by Chaoyu Tang, Sergey Shelukhin, Vaibhav Gumashta)

Posted by xu...@apache.org.
HIVE-10895: ObjectStore does not close Query objects in some calls, causing a potential leak in some metastore db resources (Aihua Xu reviewed by Chaoyu Tang, Sergey Shelukhin, Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/08595ffa
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/08595ffa
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/08595ffa

Branch: refs/heads/beeline-cli
Commit: 08595ffa33d4985b43249be9b7c5a081cece2e6a
Parents: 68eab64
Author: Vaibhav Gumashta <vg...@apache.org>
Authored: Thu Jul 9 22:53:21 2015 +0530
Committer: Vaibhav Gumashta <vg...@apache.org>
Committed: Thu Jul 9 22:53:21 2015 +0530

----------------------------------------------------------------------
 .../hive/metastore/MetaStoreDirectSql.java      |   56 +-
 .../hadoop/hive/metastore/ObjectStore.java      | 1838 ++++++++++--------
 .../hive/metastore/tools/HiveMetaTool.java      |   23 +-
 .../hadoop/hive/metastore/TestObjectStore.java  |  230 +++
 4 files changed, 1330 insertions(+), 817 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/08595ffa/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 1c21c8b..5776ec6 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -178,25 +178,44 @@ class MetaStoreDirectSql {
 
   private boolean ensureDbInit() {
     Transaction tx = pm.currentTransaction();
+    Query dbQuery = null, tblColumnQuery = null, partColumnQuery = null;
     try {
       // Force the underlying db to initialize.
-      pm.newQuery(MDatabase.class, "name == ''").execute();
-      pm.newQuery(MTableColumnStatistics.class, "dbName == ''").execute();
-      pm.newQuery(MPartitionColumnStatistics.class, "dbName == ''").execute();
+      dbQuery = pm.newQuery(MDatabase.class, "name == ''");
+      dbQuery.execute();
+
+      tblColumnQuery = pm.newQuery(MTableColumnStatistics.class, "dbName == ''");
+      tblColumnQuery.execute();
+
+      partColumnQuery = pm.newQuery(MPartitionColumnStatistics.class, "dbName == ''");
+      partColumnQuery.execute();
+
       return true;
     } catch (Exception ex) {
       LOG.warn("Database initialization failed; direct SQL is disabled", ex);
       tx.rollback();
       return false;
+    } finally {
+      if (dbQuery != null) {
+        dbQuery.closeAll();
+      }
+      if (tblColumnQuery != null) {
+        tblColumnQuery.closeAll();
+      }
+      if (partColumnQuery != null) {
+        partColumnQuery.closeAll();
+      }
     }
   }
 
   private boolean runTestQuery() {
     Transaction tx = pm.currentTransaction();
+    Query query = null;
     // Run a self-test query. If it doesn't work, we will self-disable. What a PITA...
     String selfTestQuery = "select \"DB_ID\" from \"DBS\"";
     try {
-      pm.newQuery("javax.jdo.query.SQL", selfTestQuery).execute();
+      query = pm.newQuery("javax.jdo.query.SQL", selfTestQuery);
+      query.execute();
       tx.commit();
       return true;
     } catch (Exception ex) {
@@ -204,6 +223,11 @@ class MetaStoreDirectSql {
       tx.rollback();
       return false;
     }
+    finally {
+      if (query != null) {
+        query.closeAll();
+      }
+    }
   }
 
   public boolean isCompatibleDatastore() {
@@ -393,14 +417,21 @@ class MetaStoreDirectSql {
   }
 
   private boolean isViewTable(String dbName, String tblName) throws MetaException {
-    String queryText = "select \"TBL_TYPE\" from \"TBLS\"" +
-        " inner join \"DBS\" on \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" " +
-        " where \"TBLS\".\"TBL_NAME\" = ? and \"DBS\".\"NAME\" = ?";
-    Object[] params = new Object[] { tblName, dbName };
-    Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
-    query.setUnique(true);
-    Object result = executeWithArray(query, params, queryText);
-    return (result != null) && result.toString().equals(TableType.VIRTUAL_VIEW.toString());
+    Query query = null;
+    try {
+      String queryText = "select \"TBL_TYPE\" from \"TBLS\"" +
+          " inner join \"DBS\" on \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" " +
+          " where \"TBLS\".\"TBL_NAME\" = ? and \"DBS\".\"NAME\" = ?";
+      Object[] params = new Object[] { tblName, dbName };
+      query = pm.newQuery("javax.jdo.query.SQL", queryText);
+      query.setUnique(true);
+      Object result = executeWithArray(query, params, queryText);
+      return (result != null) && result.toString().equals(TableType.VIRTUAL_VIEW.toString());
+    } finally {
+      if (query != null) {
+        query.closeAll();
+      }
+    }
   }
 
   /**
@@ -1190,6 +1221,7 @@ class MetaStoreDirectSql {
         partsFound++;
       }
     }
+    query.closeAll();
     return partsFound;
   }
 


[08/50] [abbrv] hive git commit: HIVE-10895: ObjectStore does not close Query objects in some calls, causing a potential leak in some metastore db resources (Aihua Xu reviewed by Chaoyu Tang, Sergey Shelukhin, Vaibhav Gumashta)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/08595ffa/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java b/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
index d0ff329..411ac21 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
@@ -149,16 +149,21 @@ public class HiveMetaTool {
     initObjectStore(hiveConf);
 
     System.out.println("Executing query: " + query);
-    Collection<?> result = objStore.executeJDOQLSelect(query);
-    if (result != null) {
-      Iterator<?> iter = result.iterator();
-      while (iter.hasNext()) {
-        Object o = iter.next();
-        System.out.println(o.toString());
+    ObjectStore.QueryWrapper queryWrapper = new ObjectStore.QueryWrapper();
+    try {
+      Collection<?> result = objStore.executeJDOQLSelect(query, queryWrapper);
+      if (result != null) {
+        Iterator<?> iter = result.iterator();
+        while (iter.hasNext()) {
+          Object o = iter.next();
+          System.out.println(o.toString());
+        }
+      } else {
+        System.err.println("Encountered error during executeJDOQLSelect -" +
+          "commit of JDO transaction failed.");
       }
-    } else {
-      System.err.println("Encountered error during executeJDOQLSelect -" +
-        "commit of JDO transaction failed.");
+    } finally {
+      queryWrapper.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/08595ffa/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
new file mode 100644
index 0000000..a4f9f6c
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -0,0 +1,230 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestObjectStore {
+  private ObjectStore objectStore = null;
+
+  private static final String DB1 = "testobjectstoredb1";
+  private static final String DB2 = "testobjectstoredb2";
+  private static final String TABLE1 = "testobjectstoretable1";
+  private static final String KEY1 = "testobjectstorekey1";
+  private static final String KEY2 = "testobjectstorekey2";
+  private static final String OWNER = "testobjectstoreowner";
+  private static final String USER1 = "testobjectstoreuser1";
+  private static final String ROLE1 = "testobjectstorerole1";
+  private static final String ROLE2 = "testobjectstorerole2";
+
+  public static class MockPartitionExpressionProxy implements PartitionExpressionProxy {
+    @Override
+    public String convertExprToFilter(byte[] expr) throws MetaException {
+      return null;
+    }
+
+    @Override
+    public boolean filterPartitionsByExpr(List<String> partColumnNames,
+        List<PrimitiveTypeInfo> partColumnTypeInfos, byte[] expr,
+        String defaultPartitionName, List<String> partitionNames)
+        throws MetaException {
+      return false;
+    }
+  }
+
+  @Before
+  public void setUp() {
+    HiveConf conf = new HiveConf();
+    conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, MockPartitionExpressionProxy.class.getName());
+
+    objectStore = new ObjectStore();
+    objectStore.setConf(conf);
+
+    Deadline.registerIfNot(100000);
+    try {
+      objectStore.dropDatabase(DB1);
+    } catch (Exception e) {
+    }
+    try {
+      objectStore.dropDatabase(DB2);
+    } catch (Exception e) {
+    }
+  }
+
+  @After
+  public void tearDown() {
+  }
+
+  /**
+   * Test database operations
+   */
+  @Test
+  public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSuchObjectException {
+    Database db1 = new Database(DB1, "description", "locationurl", null);
+    Database db2 = new Database(DB2, "description", "locationurl", null);
+    objectStore.createDatabase(db1);
+    objectStore.createDatabase(db2);
+
+    List<String> databases = objectStore.getAllDatabases();
+    Assert.assertEquals(2, databases.size());
+    Assert.assertEquals(DB1, databases.get(0));
+    Assert.assertEquals(DB2, databases.get(1));
+
+    objectStore.dropDatabase(DB1);
+    databases = objectStore.getAllDatabases();
+    Assert.assertEquals(1, databases.size());
+    Assert.assertEquals(DB2, databases.get(0));
+
+    objectStore.dropDatabase(DB2);
+  }
+
+  /**
+   * Test table operations
+   */
+  @Test
+  public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+    Database db1 = new Database(DB1, "description", "locationurl", null);
+    objectStore.createDatabase(db1);
+    StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
+    HashMap<String,String> params = new HashMap<String,String>();
+    params.put("EXTERNAL", "false");
+    Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE");
+    objectStore.createTable(tbl1);
+
+    List<String> tables = objectStore.getAllTables(DB1);
+    Assert.assertEquals(1, tables.size());
+    Assert.assertEquals(TABLE1, tables.get(0));
+
+    Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE");
+    objectStore.alterTable(DB1, TABLE1, newTbl1);
+    tables = objectStore.getTables(DB1, "new*");
+    Assert.assertEquals(1, tables.size());
+    Assert.assertEquals("new" + TABLE1, tables.get(0));
+
+    objectStore.dropTable(DB1, "new" + TABLE1);
+    tables = objectStore.getAllTables(DB1);
+    Assert.assertEquals(0, tables.size());
+
+    objectStore.dropDatabase(DB1);
+  }
+
+  /**
+   * Tests partition operations
+   */
+  @Test
+  public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+    Database db1 = new Database(DB1, "description", "locationurl", null);
+    objectStore.createDatabase(db1);
+    StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
+    HashMap<String,String> tableParams = new HashMap<String,String>();
+    tableParams.put("EXTERNAL", "false");
+    FieldSchema partitionKey1 = new FieldSchema("Country", "String", "");
+    FieldSchema partitionKey2 = new FieldSchema("State", "String", "");
+    Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE");
+    objectStore.createTable(tbl1);
+    HashMap<String, String> partitionParams = new HashMap<String, String>();
+    partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true");
+    List<String> value1 = Arrays.asList("US", "CA");
+    Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams);
+    objectStore.addPartition(part1);
+    List<String> value2 = Arrays.asList("US", "MA");
+    Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams);
+    objectStore.addPartition(part2);
+
+    Deadline.startTimer("getPartition");
+    List<Partition> partitions = objectStore.getPartitions(DB1, TABLE1, 10);
+    Assert.assertEquals(2, partitions.size());
+    Assert.assertEquals(111, partitions.get(0).getCreateTime());
+    Assert.assertEquals(222, partitions.get(1).getCreateTime());
+
+    objectStore.dropPartition(DB1, TABLE1, value1);
+    partitions = objectStore.getPartitions(DB1, TABLE1, 10);
+    Assert.assertEquals(1, partitions.size());
+    Assert.assertEquals(222, partitions.get(0).getCreateTime());
+
+    objectStore.dropPartition(DB1, TABLE1, value2);
+    objectStore.dropTable(DB1, TABLE1);
+    objectStore.dropDatabase(DB1);
+  }
+
+  /**
+   * Test master keys operation
+   */
+  @Test
+  public void testMasterKeyOps() throws MetaException, NoSuchObjectException {
+    int id1 = objectStore.addMasterKey(KEY1);
+    int id2 = objectStore.addMasterKey(KEY2);
+
+    String[] keys = objectStore.getMasterKeys();
+    Assert.assertEquals(2, keys.length);
+    Assert.assertEquals(KEY1, keys[0]);
+    Assert.assertEquals(KEY2, keys[1]);
+
+    objectStore.updateMasterKey(id1, "new" + KEY1);
+    objectStore.updateMasterKey(id2, "new" + KEY2);
+    keys = objectStore.getMasterKeys();
+    Assert.assertEquals(2, keys.length);
+    Assert.assertEquals("new" + KEY1, keys[0]);
+    Assert.assertEquals("new" + KEY2, keys[1]);
+
+    objectStore.removeMasterKey(id1);
+    keys = objectStore.getMasterKeys();
+    Assert.assertEquals(1, keys.length);
+    Assert.assertEquals("new" + KEY2, keys[0]);
+
+    objectStore.removeMasterKey(id2);
+  }
+
+  /**
+   * Test role operation
+   */
+  @Test
+  public void testRoleOps() throws InvalidObjectException, MetaException, NoSuchObjectException {
+    objectStore.addRole(ROLE1, OWNER);
+    objectStore.addRole(ROLE2, OWNER);
+    List<String> roles = objectStore.listRoleNames();
+    Assert.assertEquals(2, roles.size());
+    Assert.assertEquals(ROLE2, roles.get(1));
+    Role role1 = objectStore.getRole(ROLE1);
+    Assert.assertEquals(OWNER, role1.getOwnerName());
+    objectStore.grantRole(role1, USER1, PrincipalType.USER, OWNER, PrincipalType.ROLE, true);
+    objectStore.revokeRole(role1, USER1, PrincipalType.USER, false);
+    objectStore.removeRole(ROLE1);
+  }
+}


[23/50] [abbrv] hive git commit: HIVE-11231 : CBO: Calcite Operator To Hive Operator (Calcite Return Path): make the output of ba_table_union.q more stable (Pengcheng Xiong via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-11231 : CBO: Calcite Operator To Hive Operator (Calcite Return Path): make the output of ba_table_union.q more stable (Pengcheng Xiong via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/65e9fcf0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/65e9fcf0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/65e9fcf0

Branch: refs/heads/beeline-cli
Commit: 65e9fcf059f5e274c4b7871e7bc4034db98e8591
Parents: 66feedc
Author: Pengcheng Xiong <px...@hortonworks.com>
Authored: Mon Jul 20 12:16:00 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Mon Jul 13 09:32:29 2015 -0700

----------------------------------------------------------------------
 ql/src/test/queries/clientpositive/ba_table_union.q |  2 +-
 .../results/clientpositive/ba_table_union.q.out     | 16 ++++++++--------
 2 files changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/65e9fcf0/ql/src/test/queries/clientpositive/ba_table_union.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ba_table_union.q b/ql/src/test/queries/clientpositive/ba_table_union.q
index 9804659..bf35d0e 100644
--- a/ql/src/test/queries/clientpositive/ba_table_union.q
+++ b/ql/src/test/queries/clientpositive/ba_table_union.q
@@ -7,7 +7,7 @@ describe extended ba_test;
 
 from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary);
 
-select * from ( select key  from src where key < 50 union all select cast(ba_key as string) as key from ba_test limit 50) unioned order by key limit 10;
+select * from ( select key  from src where key < 50 union all select cast(ba_key as string) as key from ba_test order by key limit 50) unioned order by key limit 10;
 
 drop table ba_test;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/65e9fcf0/ql/src/test/results/clientpositive/ba_table_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ba_table_union.q.out b/ql/src/test/results/clientpositive/ba_table_union.q.out
index 639ffda..53f16b6 100644
--- a/ql/src/test/results/clientpositive/ba_table_union.q.out
+++ b/ql/src/test/results/clientpositive/ba_table_union.q.out
@@ -32,12 +32,12 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@ba_test
 POSTHOOK: Lineage: ba_test.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: ba_test.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: select * from ( select key  from src where key < 50 union all select cast(ba_key as string) as key from ba_test limit 50) unioned order by key limit 10
+PREHOOK: query: select * from ( select key  from src where key < 50 union all select cast(ba_key as string) as key from ba_test order by key limit 50) unioned order by key limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ba_test
 PREHOOK: Input: default@src
 #### A masked pattern was here ####
-POSTHOOK: query: select * from ( select key  from src where key < 50 union all select cast(ba_key as string) as key from ba_test limit 50) unioned order by key limit 10
+POSTHOOK: query: select * from ( select key  from src where key < 50 union all select cast(ba_key as string) as key from ba_test order by key limit 50) unioned order by key limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@ba_test
 POSTHOOK: Input: default@src
@@ -45,13 +45,13 @@ POSTHOOK: Input: default@src
 0
 0
 0
+0
+0
+0
+10
 10
-11
-12
-12
-15
-15
-153
+100
+100
 PREHOOK: query: drop table ba_test
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@ba_test


[50/50] [abbrv] hive git commit: HIVE-11275: Merge branch 'master' into beeline-cl branch 07/14/2015i

Posted by xu...@apache.org.
HIVE-11275: Merge branch 'master' into beeline-cl branch 07/14/2015i


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e6adedc1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e6adedc1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e6adedc1

Branch: refs/heads/beeline-cli
Commit: e6adedc1ca83ee30492dc3c05b16aeb81e640aef
Parents: 0ac8f6c e61a1a9
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Wed Jul 15 21:33:21 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Wed Jul 15 21:33:21 2015 -0700

----------------------------------------------------------------------
 .gitignore                                      |    1 +
 .../org/apache/hive/beeline/BeeLineOpts.java    |    4 +-
 .../java/org/apache/hive/beeline/Commands.java  |   23 +-
 bin/ext/hplsql.sh                               |   37 +
 bin/hplsql                                      |   25 +
 bin/hplsql.cmd                                  |   58 +
 .../apache/hadoop/hive/common/FileUtils.java    |    9 +-
 .../hadoop/hive/common/JvmPauseMonitor.java     |    7 +-
 .../hive/common/metrics/LegacyMetrics.java      |   30 +-
 .../hive/common/metrics/common/Metrics.java     |   27 +
 .../common/metrics/common/MetricsConstant.java  |   35 +
 .../common/metrics/common/MetricsVariable.java  |   26 +
 .../metrics/metrics2/CodahaleMetrics.java       |   58 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   12 +-
 .../metrics/metrics2/TestCodahaleMetrics.java   |   42 +
 data/files/encoding-utf8.txt                    |   12 +
 data/files/encoding_iso-8859-1.txt              |    4 +
 .../results/positive/external_table_ppd.q.out   |    1 -
 .../positive/hbase_binary_storage_queries.q.out |    2 -
 .../src/test/results/positive/hbase_stats.q.out |    7 -
 .../test/results/positive/hbase_stats2.q.out    |    7 -
 .../test/results/positive/hbase_stats3.q.out    |   12 -
 .../positive/hbase_stats_empty_partition.q.out  |    2 -
 .../SemanticAnalysis/HCatSemanticAnalyzer.java  |    7 +-
 .../src/test/e2e/templeton/deployers/env.sh     |    5 +-
 hcatalog/streaming/pom.xml                      |    6 +
 .../streaming/AbstractRecordWriter.java         |    4 +-
 .../streaming/mutate/HiveConfFactory.java       |   63 +
 .../mutate/UgiMetaStoreClientFactory.java       |  102 +
 .../streaming/mutate/client/AcidTable.java      |  112 +
 .../mutate/client/AcidTableSerializer.java      |  100 +
 .../mutate/client/ClientException.java          |   15 +
 .../mutate/client/ConnectionException.java      |   15 +
 .../streaming/mutate/client/MutatorClient.java  |  149 +
 .../mutate/client/MutatorClientBuilder.java     |  115 +
 .../streaming/mutate/client/TableType.java      |   37 +
 .../streaming/mutate/client/Transaction.java    |  114 +
 .../mutate/client/TransactionException.java     |   15 +
 .../mutate/client/lock/HeartbeatFactory.java    |   30 +
 .../mutate/client/lock/HeartbeatTimerTask.java  |   66 +
 .../streaming/mutate/client/lock/Lock.java      |  305 ++
 .../mutate/client/lock/LockException.java       |   15 +
 .../mutate/client/lock/LockFailureListener.java |   26 +
 .../mutate/doc-files/system-overview.dot        |   27 +
 .../hive/hcatalog/streaming/mutate/package.html |  495 +++
 .../mutate/worker/BucketIdException.java        |   11 +
 .../mutate/worker/BucketIdResolver.java         |   11 +
 .../mutate/worker/BucketIdResolverImpl.java     |   76 +
 .../mutate/worker/CreatePartitionHelper.java    |   83 +
 .../mutate/worker/GroupRevisitedException.java  |   11 +
 .../mutate/worker/GroupingValidator.java        |   74 +
 .../streaming/mutate/worker/Mutator.java        |   21 +
 .../mutate/worker/MutatorCoordinator.java       |  281 ++
 .../worker/MutatorCoordinatorBuilder.java       |   76 +
 .../streaming/mutate/worker/MutatorFactory.java |   16 +
 .../streaming/mutate/worker/MutatorImpl.java    |   86 +
 .../streaming/mutate/worker/OperationType.java  |    7 +
 .../worker/PartitionCreationException.java      |   15 +
 .../mutate/worker/RecordInspector.java          |   11 +
 .../mutate/worker/RecordInspectorImpl.java      |   45 +
 .../mutate/worker/RecordSequenceException.java  |   11 +
 .../mutate/worker/SequenceValidator.java        |   49 +
 .../mutate/worker/WorkerException.java          |   15 +
 .../streaming/mutate/ExampleUseCase.java        |   82 +
 .../streaming/mutate/MutableRecord.java         |   50 +
 .../mutate/ReflectiveMutatorFactory.java        |   51 +
 .../streaming/mutate/StreamingAssert.java       |  191 +
 .../streaming/mutate/StreamingTestUtils.java    |  261 ++
 .../streaming/mutate/TestMutations.java         |  544 +++
 .../mutate/client/TestAcidTableSerializer.java  |   66 +
 .../mutate/client/TestMutatorClient.java        |  176 +
 .../mutate/client/TestTransaction.java          |   95 +
 .../client/lock/TestHeartbeatTimerTask.java     |  100 +
 .../streaming/mutate/client/lock/TestLock.java  |  310 ++
 .../mutate/worker/TestBucketIdResolverImpl.java |   38 +
 .../mutate/worker/TestGroupingValidator.java    |   70 +
 .../mutate/worker/TestMutatorCoordinator.java   |  234 ++
 .../mutate/worker/TestMutatorImpl.java          |   99 +
 .../mutate/worker/TestRecordInspectorImpl.java  |   31 +
 .../mutate/worker/TestSequenceValidator.java    |   91 +
 .../hive/hcatalog/api/HCatClientHMSImpl.java    |   14 +-
 .../hive/hcatalog/templeton/AppConfig.java      |   28 +-
 .../hcatalog/templeton/SecureProxySupport.java  |    6 +-
 hplsql/pom.xml                                  |  128 +
 .../antlr4/org/apache/hive/hplsql/Hplsql.g4     | 1426 ++++++++
 .../java/org/apache/hive/hplsql/Arguments.java  |  206 ++
 .../main/java/org/apache/hive/hplsql/Conf.java  |  175 +
 .../main/java/org/apache/hive/hplsql/Conn.java  |  243 ++
 .../java/org/apache/hive/hplsql/Converter.java  |   56 +
 .../main/java/org/apache/hive/hplsql/Copy.java  |  426 +++
 .../main/java/org/apache/hive/hplsql/Exec.java  | 1959 +++++++++++
 .../java/org/apache/hive/hplsql/Expression.java |  574 +++
 .../main/java/org/apache/hive/hplsql/File.java  |  132 +
 .../java/org/apache/hive/hplsql/Handler.java    |   41 +
 .../java/org/apache/hive/hplsql/Hplsql.java     |   25 +
 .../java/org/apache/hive/hplsql/Interval.java   |  109 +
 .../main/java/org/apache/hive/hplsql/Query.java |  155 +
 .../main/java/org/apache/hive/hplsql/Scope.java |   69 +
 .../java/org/apache/hive/hplsql/Select.java     |  411 +++
 .../java/org/apache/hive/hplsql/Signal.java     |   48 +
 .../main/java/org/apache/hive/hplsql/Stmt.java  | 1021 ++++++
 .../org/apache/hive/hplsql/StreamGobbler.java   |   51 +
 .../main/java/org/apache/hive/hplsql/Timer.java |   59 +
 .../main/java/org/apache/hive/hplsql/Udf.java   |  117 +
 .../main/java/org/apache/hive/hplsql/Utils.java |  289 ++
 .../main/java/org/apache/hive/hplsql/Var.java   |  430 +++
 .../apache/hive/hplsql/functions/Function.java  |  709 ++++
 .../hive/hplsql/functions/FunctionDatetime.java |  151 +
 .../hive/hplsql/functions/FunctionMisc.java     |  188 +
 .../hive/hplsql/functions/FunctionOra.java      |  231 ++
 .../hive/hplsql/functions/FunctionString.java   |  276 ++
 .../org/apache/hive/hplsql/TestHplsqlLocal.java |  330 ++
 hplsql/src/test/queries/local/add.sql           |    2 +
 hplsql/src/test/queries/local/assign.sql        |    7 +
 hplsql/src/test/queries/local/bool_expr.sql     |   47 +
 hplsql/src/test/queries/local/break.sql         |   10 +
 hplsql/src/test/queries/local/case.sql          |   35 +
 hplsql/src/test/queries/local/cast.sql          |    4 +
 hplsql/src/test/queries/local/char.sql          |    1 +
 hplsql/src/test/queries/local/coalesce.sql      |    4 +
 hplsql/src/test/queries/local/concat.sql        |    2 +
 .../src/test/queries/local/create_function.sql  |   11 +
 .../src/test/queries/local/create_function2.sql |   11 +
 .../src/test/queries/local/create_procedure.sql |    9 +
 hplsql/src/test/queries/local/date.sql          |    5 +
 hplsql/src/test/queries/local/dbms_output.sql   |    6 +
 hplsql/src/test/queries/local/declare.sql       |   16 +
 .../test/queries/local/declare_condition.sql    |    8 +
 .../test/queries/local/declare_condition2.sql   |   10 +
 hplsql/src/test/queries/local/decode.sql        |   10 +
 hplsql/src/test/queries/local/equal.sql         |   55 +
 hplsql/src/test/queries/local/exception.sql     |   14 +
 hplsql/src/test/queries/local/exception2.sql    |   10 +
 hplsql/src/test/queries/local/exception3.sql    |    5 +
 hplsql/src/test/queries/local/exception4.sql    |    7 +
 hplsql/src/test/queries/local/exception5.sql    |   10 +
 hplsql/src/test/queries/local/exit.sql          |   31 +
 hplsql/src/test/queries/local/expr.sql          |   21 +
 hplsql/src/test/queries/local/for_range.sql     |   20 +
 hplsql/src/test/queries/local/if.sql            |   68 +
 hplsql/src/test/queries/local/instr.sql         |   49 +
 hplsql/src/test/queries/local/interval.sql      |   15 +
 hplsql/src/test/queries/local/lang.sql          |   57 +
 hplsql/src/test/queries/local/leave.sql         |   33 +
 hplsql/src/test/queries/local/len.sql           |    1 +
 hplsql/src/test/queries/local/length.sql        |    1 +
 hplsql/src/test/queries/local/lower.sql         |    1 +
 hplsql/src/test/queries/local/nvl.sql           |    4 +
 hplsql/src/test/queries/local/nvl2.sql          |    2 +
 hplsql/src/test/queries/local/print.sql         |    5 +
 hplsql/src/test/queries/local/return.sql        |    3 +
 hplsql/src/test/queries/local/seterror.sql      |   10 +
 hplsql/src/test/queries/local/sub.sql           |    1 +
 hplsql/src/test/queries/local/substr.sql        |    2 +
 hplsql/src/test/queries/local/substring.sql     |    8 +
 hplsql/src/test/queries/local/timestamp.sql     |    4 +
 hplsql/src/test/queries/local/timestamp_iso.sql |    2 +
 hplsql/src/test/queries/local/to_char.sql       |    1 +
 hplsql/src/test/queries/local/to_timestamp.sql  |    5 +
 hplsql/src/test/queries/local/trim.sql          |    1 +
 hplsql/src/test/queries/local/twopipes.sql      |    1 +
 hplsql/src/test/queries/local/upper.sql         |    1 +
 hplsql/src/test/queries/local/values_into.sql   |    6 +
 hplsql/src/test/queries/local/while.sql         |   20 +
 hplsql/src/test/results/local/add.out.txt       |    2 +
 hplsql/src/test/results/local/assign.out.txt    |    8 +
 hplsql/src/test/results/local/bool_expr.out.txt |   32 +
 hplsql/src/test/results/local/break.out.txt     |   29 +
 hplsql/src/test/results/local/case.out.txt      |   12 +
 hplsql/src/test/results/local/cast.out.txt      |    8 +
 hplsql/src/test/results/local/char.out.txt      |    1 +
 hplsql/src/test/results/local/coalesce.out.txt  |    4 +
 hplsql/src/test/results/local/concat.out.txt    |    2 +
 .../test/results/local/create_function.out.txt  |    9 +
 .../test/results/local/create_function2.out.txt |   10 +
 .../test/results/local/create_procedure.out.txt |    8 +
 hplsql/src/test/results/local/date.out.txt      |    4 +
 .../src/test/results/local/dbms_output.out.txt  |    3 +
 hplsql/src/test/results/local/declare.out.txt   |   13 +
 .../results/local/declare_condition.out.txt     |    7 +
 .../results/local/declare_condition2.out.txt    |   12 +
 hplsql/src/test/results/local/decode.out.txt    |   13 +
 hplsql/src/test/results/local/equal.out.txt     |   48 +
 hplsql/src/test/results/local/exception.out.txt |   13 +
 .../src/test/results/local/exception2.out.txt   |    5 +
 hplsql/src/test/results/local/exit.out.txt      |   42 +
 hplsql/src/test/results/local/expr.out.txt      |   29 +
 hplsql/src/test/results/local/for_range.out.txt |   65 +
 hplsql/src/test/results/local/if.out.txt        |   40 +
 hplsql/src/test/results/local/instr.out.txt     |   33 +
 hplsql/src/test/results/local/interval.out.txt  |   11 +
 hplsql/src/test/results/local/lang.out.txt      |   34 +
 hplsql/src/test/results/local/leave.out.txt     |   42 +
 hplsql/src/test/results/local/len.out.txt       |    1 +
 hplsql/src/test/results/local/length.out.txt    |    1 +
 hplsql/src/test/results/local/lower.out.txt     |    1 +
 hplsql/src/test/results/local/nvl.out.txt       |    4 +
 hplsql/src/test/results/local/nvl2.out.txt      |    2 +
 .../test/results/local/plhqlexception.out.txt   |    6 +
 .../test/results/local/plhqlexception1.out.txt  |   10 +
 .../test/results/local/plhqlexception2.out.txt  |  106 +
 hplsql/src/test/results/local/print.out.txt     |    6 +
 hplsql/src/test/results/local/return.out.txt    |    3 +
 .../results/local/select_conversion.out.txt     |    9 +
 hplsql/src/test/results/local/seterror.out.txt  |    6 +
 hplsql/src/test/results/local/sub.out.txt       |    1 +
 hplsql/src/test/results/local/substr.out.txt    |    2 +
 hplsql/src/test/results/local/substring.out.txt |    8 +
 hplsql/src/test/results/local/timestamp.out.txt |    4 +
 .../test/results/local/timestamp_iso.out.txt    |    2 +
 hplsql/src/test/results/local/to_char.out.txt   |    1 +
 .../src/test/results/local/to_timestamp.out.txt |    4 +
 hplsql/src/test/results/local/trim.out.txt      |    1 +
 hplsql/src/test/results/local/twopipes.out.txt  |    1 +
 hplsql/src/test/results/local/upper.out.txt     |    1 +
 .../src/test/results/local/values_into.out.txt  |   11 +
 hplsql/src/test/results/local/while.out.txt     |   72 +
 .../hive/metastore/TestMetaStoreMetrics.java    |   66 +-
 .../hive/beeline/TestBeeLineWithArgs.java       |   87 +
 .../test/resources/testconfiguration.properties |    2 +
 .../org/apache/hive/jdbc/HiveConnection.java    |   15 +-
 .../hive/metastore/AggregateStatsCache.java     |    5 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   74 +-
 .../hive/metastore/HiveMetaStoreClient.java     |    7 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |   57 +-
 .../hive/metastore/MetaStoreDirectSql.java      |   56 +-
 .../hive/metastore/MetaStoreSchemaInfo.java     |    6 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |   26 -
 .../hadoop/hive/metastore/ObjectStore.java      | 1894 +++++-----
 .../hive/metastore/PartitionDropOptions.java    |    6 -
 .../hadoop/hive/metastore/ProtectMode.java      |   97 -
 .../hive/metastore/tools/HiveMetaTool.java      |   23 +-
 .../hadoop/hive/metastore/TestObjectStore.java  |  230 ++
 pom.xml                                         |    9 +-
 ql/pom.xml                                      |    5 +
 .../java/org/apache/hadoop/hive/ql/Driver.java  |   34 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   22 +-
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |    9 +-
 .../hive/ql/exec/CommonMergeJoinOperator.java   |   76 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  207 +-
 .../hadoop/hive/ql/exec/FilterOperator.java     |    3 +-
 .../hive/ql/exec/mr/ExecMapperContext.java      |   10 +-
 .../ql/exec/spark/SparkReduceRecordHandler.java |   29 +-
 .../hive/ql/exec/tez/HashTableLoader.java       |   19 +-
 .../ql/exec/tez/MergeFileRecordProcessor.java   |   42 +-
 .../hive/ql/exec/tez/ReduceRecordSource.java    |   33 +-
 .../ql/exec/vector/VectorizedBatchUtil.java     |   41 +-
 .../ql/exec/vector/VectorizedRowBatchCtx.java   |    2 +-
 .../mapjoin/VectorMapJoinRowBytesContainer.java |    9 +-
 .../hadoop/hive/ql/hooks/HookContext.java       |   12 +
 .../hadoop/hive/ql/hooks/LineageInfo.java       |   96 +
 .../hadoop/hive/ql/hooks/LineageLogger.java     |  441 +++
 .../hadoop/hive/ql/hooks/WriteEntity.java       |    6 +-
 .../hadoop/hive/ql/io/AcidInputFormat.java      |   60 +-
 .../hadoop/hive/ql/io/AcidOutputFormat.java     |   49 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  152 +-
 .../hadoop/hive/ql/io/FileFormatException.java  |   30 +
 .../ql/io/HiveContextAwareRecordReader.java     |    2 +-
 .../hadoop/hive/ql/io/HiveFileFormatUtils.java  |   19 +-
 .../org/apache/hadoop/hive/ql/io/IOContext.java |   43 -
 .../apache/hadoop/hive/ql/io/IOContextMap.java  |   81 +
 .../apache/hadoop/hive/ql/io/orc/FileDump.java  |    7 +-
 .../hadoop/hive/ql/io/orc/MemoryManager.java    |   39 +-
 .../apache/hadoop/hive/ql/io/orc/OrcFile.java   |   15 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |   60 +-
 .../hive/ql/io/orc/OrcNewInputFormat.java       |   16 +-
 .../hadoop/hive/ql/io/orc/OrcNewSplit.java      |   13 +-
 .../hive/ql/io/orc/OrcRawRecordMerger.java      |   66 +-
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |   63 +-
 .../apache/hadoop/hive/ql/io/orc/OrcSplit.java  |   16 +-
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       |   19 +-
 .../hive/ql/io/orc/RecordReaderFactory.java     |    7 +-
 .../ql/io/orc/RunLengthIntegerReaderV2.java     |    8 +-
 .../hadoop/hive/ql/io/orc/WriterImpl.java       |   67 +-
 .../read/ParquetRecordReaderWrapper.java        |  102 +-
 .../ql/io/parquet/write/DataWritableWriter.java |  638 ++--
 .../hive/ql/io/sarg/SearchArgumentImpl.java     |  343 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |   20 +-
 .../hadoop/hive/ql/lockmgr/DummyTxnManager.java |    4 +
 .../hadoop/hive/ql/lockmgr/HiveLockObject.java  |   35 +
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java  |   50 +-
 .../hive/ql/lockmgr/HiveTxnManagerImpl.java     |  140 +
 .../ql/log/NoDeleteRollingFileAppender.java     |  176 +
 .../apache/hadoop/hive/ql/log/PerfLogger.java   |    1 -
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   64 +-
 .../hadoop/hive/ql/metadata/HiveUtils.java      |    8 -
 .../hadoop/hive/ql/metadata/Partition.java      |   51 -
 .../apache/hadoop/hive/ql/metadata/Table.java   |   65 +-
 .../formatting/MetaDataFormatUtils.java         |   24 +-
 .../BucketingSortingReduceSinkOptimizer.java    |    4 +
 .../hive/ql/optimizer/ConstantPropagate.java    |    1 +
 .../ql/optimizer/ConstantPropagateProcCtx.java  |   10 +-
 .../optimizer/ConstantPropagateProcFactory.java |    4 +-
 .../optimizer/RemoveDynamicPruningBySize.java   |    2 +-
 .../ql/optimizer/calcite/HiveCalciteUtil.java   |   25 +-
 .../ql/optimizer/calcite/HiveRelOptUtil.java    |   73 +-
 .../calcite/cost/HiveOnTezCostModel.java        |   25 +-
 .../calcite/reloperators/HiveJoin.java          |   25 +-
 .../calcite/reloperators/HiveMultiJoin.java     |   37 +-
 .../calcite/reloperators/HiveSemiJoin.java      |   57 +-
 .../rules/HiveInsertExchange4JoinRule.java      |   13 +-
 .../calcite/rules/HiveJoinAddNotNullRule.java   |   16 +-
 .../calcite/rules/HiveJoinCommuteRule.java      |   96 +
 .../HiveJoinPushTransitivePredicatesRule.java   |  139 +
 .../calcite/rules/HiveJoinToMultiJoinRule.java  |  106 +-
 .../calcite/rules/HiveRelFieldTrimmer.java      |    3 +-
 .../calcite/stats/HiveRelMdSelectivity.java     |   11 +-
 .../calcite/translator/ASTConverter.java        |   20 +-
 .../calcite/translator/ExprNodeConverter.java   |   17 +
 .../calcite/translator/HiveOpConverter.java     |  161 +-
 .../translator/HiveOpConverterPostProc.java     |   34 +-
 .../translator/PlanModifierForASTConv.java      |   12 +-
 .../ql/optimizer/lineage/ExprProcFactory.java   |   98 +
 .../hive/ql/optimizer/lineage/Generator.java    |   16 +-
 .../hive/ql/optimizer/lineage/LineageCtx.java   |   79 +-
 .../ql/optimizer/lineage/OpProcFactory.java     |  228 +-
 .../ql/optimizer/physical/MemoryDecider.java    |  288 ++
 .../ql/optimizer/physical/SerializeFilter.java  |  178 +
 .../stats/annotation/StatsRulesProcFactory.java |   42 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   78 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  112 +-
 .../apache/hadoop/hive/ql/parse/EximUtil.java   |   31 +-
 .../hadoop/hive/ql/parse/GenTezProcContext.java |    8 +
 .../hadoop/hive/ql/parse/GenTezUtils.java       |   59 +-
 .../apache/hadoop/hive/ql/parse/GenTezWork.java |   10 +-
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |    5 -
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |   38 +-
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |   79 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   96 +-
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |    8 +-
 .../hadoop/hive/ql/parse/TezCompiler.java       |   28 +-
 .../hive/ql/plan/AbstractOperatorDesc.java      |   14 +
 .../hadoop/hive/ql/plan/AlterTableDesc.java     |   20 +-
 .../apache/hadoop/hive/ql/plan/BaseWork.java    |    7 +
 .../hadoop/hive/ql/plan/DropTableDesc.java      |   26 +-
 .../hadoop/hive/ql/plan/FileSinkDesc.java       |   27 +-
 .../apache/hadoop/hive/ql/plan/FilterDesc.java  |   14 +
 .../hadoop/hive/ql/plan/HiveOperation.java      |    2 -
 .../apache/hadoop/hive/ql/plan/JoinDesc.java    |    4 +
 .../hadoop/hive/ql/plan/MergeJoinWork.java      |    8 +-
 .../hadoop/hive/ql/plan/OperatorDesc.java       |    2 +
 .../hadoop/hive/ql/plan/TableScanDesc.java      |   15 +-
 .../authorization/plugin/HiveAuthorizer.java    |   11 +
 .../plugin/HiveAuthorizerImpl.java              |   22 +
 .../authorization/plugin/HiveV1Authorizer.java  |   20 +
 .../hadoop/hive/ql/session/LineageState.java    |    9 +-
 .../hadoop/hive/ql/session/SessionState.java    |   18 +-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |   11 +
 .../hive/ql/txn/compactor/CompactorMR.java      |    4 +-
 .../ql/udf/generic/GenericUDAFCollectList.java  |    5 -
 .../ql/udf/generic/GenericUDAFCollectSet.java   |    5 -
 .../ql/udf/generic/GenericUDAFComputeStats.java |    4 +-
 .../hive/ql/udf/generic/GenericUDAFCount.java   |    4 -
 .../ql/udf/generic/GenericUDAFCovariance.java   |    4 -
 .../ql/udf/generic/GenericUDAFCumeDist.java     |    4 -
 .../ql/udf/generic/GenericUDAFDenseRank.java    |    4 -
 .../generic/GenericUDAFHistogramNumeric.java    |    2 +-
 .../generic/GenericUDAFPercentileApprox.java    |    2 +-
 .../udf/generic/NumDistinctValueEstimator.java  |   18 +
 .../hive/ql/udf/generic/NumericHistogram.java   |   14 +
 .../hadoop/hive/ql/util/JavaDataModel.java      |   30 -
 .../hive/ql/exec/TestFileSinkOperator.java      |    3 +-
 .../hadoop/hive/ql/exec/TestOperators.java      |    3 +-
 .../apache/hadoop/hive/ql/io/TestAcidUtils.java |   73 +-
 .../ql/io/TestHiveBinarySearchRecordReader.java |    2 +-
 .../hadoop/hive/ql/io/TestIOContextMap.java     |  133 +
 .../hadoop/hive/ql/io/orc/TestFileDump.java     |   50 +
 .../hive/ql/io/orc/TestInputOutputFormat.java   |  114 +-
 .../hive/ql/io/orc/TestOrcRawRecordMerger.java  |   57 +-
 .../hive/ql/io/orc/TestOrcRecordUpdater.java    |    6 +-
 .../hive/ql/io/sarg/TestSearchArgumentImpl.java |   32 +-
 .../parse/TestUpdateDeleteSemanticAnalyzer.java |    2 +-
 .../hive/ql/txn/compactor/CompactorTest.java    |   20 +-
 .../hive/ql/txn/compactor/TestCleaner.java      |    8 +-
 .../hive/ql/txn/compactor/TestCleaner2.java     |   14 +
 .../hive/ql/txn/compactor/TestInitiator.java    |    4 +
 .../hive/ql/txn/compactor/TestWorker.java       |   49 +-
 .../hive/ql/txn/compactor/TestWorker2.java      |   16 +
 .../alter_partition_invalidspec.q               |    8 -
 .../clientnegative/alter_partition_nodrop.q     |    9 -
 .../alter_partition_nodrop_table.q              |    9 -
 .../clientnegative/alter_partition_offline.q    |   11 -
 .../clientnegative/drop_table_failure3.q        |   12 -
 .../queries/clientnegative/exchange_partition.q |   19 +
 .../queries/clientnegative/load_orc_negative1.q |    4 +
 .../queries/clientnegative/load_orc_negative2.q |    6 +
 .../clientnegative/load_orc_negative_part.q     |   14 +
 .../queries/clientnegative/protectmode_part.q   |   15 -
 .../queries/clientnegative/protectmode_part1.q  |   21 -
 .../queries/clientnegative/protectmode_part2.q  |    9 -
 .../clientnegative/protectmode_part_no_drop.q   |   10 -
 .../clientnegative/protectmode_part_no_drop2.q  |   11 -
 .../queries/clientnegative/protectmode_tbl1.q   |    8 -
 .../queries/clientnegative/protectmode_tbl2.q   |   12 -
 .../queries/clientnegative/protectmode_tbl3.q   |   10 -
 .../queries/clientnegative/protectmode_tbl4.q   |   15 -
 .../queries/clientnegative/protectmode_tbl5.q   |   15 -
 .../queries/clientnegative/protectmode_tbl6.q   |    8 -
 .../queries/clientnegative/protectmode_tbl7.q   |   13 -
 .../queries/clientnegative/protectmode_tbl8.q   |   13 -
 .../clientnegative/protectmode_tbl_no_drop.q    |    9 -
 .../test/queries/clientnegative/sa_fail_hook3.q |    4 -
 .../alter_partition_protect_mode.q              |   26 -
 .../queries/clientpositive/ba_table_union.q     |    2 +-
 .../queries/clientpositive/cbo_rp_auto_join0.q  |    1 +
 .../queries/clientpositive/cbo_rp_auto_join1.q  |    1 +
 .../test/queries/clientpositive/cbo_rp_join0.q  |    1 +
 .../test/queries/clientpositive/constprog_dpp.q |   17 +
 ql/src/test/queries/clientpositive/cp_sel.q     |   11 +
 .../drop_partitions_ignore_protection.q         |   10 -
 .../queries/clientpositive/encoding_nonutf8.q   |    7 +
 .../encryption_insert_partition_static.q        |   17 -
 .../queries/clientpositive/fouter_join_ppr.q    |   73 +
 .../insert_non_utf8_encoding_table.q            |   20 +
 ql/src/test/queries/clientpositive/lineage2.q   |  116 +
 ql/src/test/queries/clientpositive/lineage3.q   |  162 +
 ql/src/test/queries/clientpositive/load_orc.q   |   10 +
 .../test/queries/clientpositive/load_orc_part.q |   15 +
 ql/src/test/queries/clientpositive/mrr.q        |    2 +
 .../test/queries/clientpositive/protectmode.q   |   63 -
 .../test/queries/clientpositive/protectmode2.q  |   23 -
 .../queries/clientpositive/select_same_col.q    |    5 +-
 .../clientpositive/vectorization_part_varchar.q |    7 +
 .../resources/orc-file-dump-bloomfilter.out     |   92 +-
 .../resources/orc-file-dump-bloomfilter2.out    |   92 +-
 .../orc-file-dump-dictionary-threshold.out      |   76 +-
 ql/src/test/resources/orc-file-dump.json        |  108 +-
 ql/src/test/resources/orc-file-dump.out         |   84 +-
 ql/src/test/resources/orc-file-has-null.out     |   62 +-
 .../alter_numbuckets_partitioned_table.q.out    |    8 -
 .../results/beelinepositive/create_like.q.out   |    3 -
 .../results/beelinepositive/create_like2.q.out  |    1 -
 .../beelinepositive/create_like_view.q.out      |    4 -
 .../beelinepositive/create_skewed_table1.q.out  |    3 -
 .../results/beelinepositive/create_view.q.out   |   14 -
 .../create_view_partitioned.q.out               |    3 -
 ql/src/test/results/beelinepositive/ctas.q.out  |    5 -
 .../describe_formatted_view_partitioned.q.out   |    1 -
 .../beelinepositive/describe_table.q.out        |    3 -
 .../test/results/beelinepositive/merge3.q.out   |    1 -
 .../part_inherit_tbl_props.q.out                |    1 -
 .../part_inherit_tbl_props_empty.q.out          |    1 -
 .../part_inherit_tbl_props_with_star.q.out      |    1 -
 .../results/beelinepositive/protectmode2.q.out  |    2 -
 .../test/results/beelinepositive/stats1.q.out   |    2 -
 .../test/results/beelinepositive/stats10.q.out  |    3 -
 .../test/results/beelinepositive/stats11.q.out  |    4 -
 .../test/results/beelinepositive/stats12.q.out  |    5 -
 .../test/results/beelinepositive/stats13.q.out  |    6 -
 .../test/results/beelinepositive/stats14.q.out  |    5 -
 .../test/results/beelinepositive/stats15.q.out  |    5 -
 .../test/results/beelinepositive/stats16.q.out  |    2 -
 .../test/results/beelinepositive/stats18.q.out  |    2 -
 .../test/results/beelinepositive/stats2.q.out   |    2 -
 .../test/results/beelinepositive/stats3.q.out   |    2 -
 .../test/results/beelinepositive/stats4.q.out   |    6 -
 .../test/results/beelinepositive/stats5.q.out   |    1 -
 .../test/results/beelinepositive/stats6.q.out   |    5 -
 .../test/results/beelinepositive/stats7.q.out   |    3 -
 .../test/results/beelinepositive/stats8.q.out   |   10 -
 .../test/results/beelinepositive/stats9.q.out   |    1 -
 .../beelinepositive/stats_empty_partition.q.out |    1 -
 .../clientnegative/alter_file_format.q.out      |    1 -
 .../alter_view_as_select_with_partition.q.out   |    1 -
 .../clientnegative/exchange_partition.q.out     |   54 +
 .../clientnegative/load_orc_negative1.q.out     |    9 +
 .../clientnegative/load_orc_negative2.q.out     |   25 +
 .../clientnegative/load_orc_negative_part.q.out |   52 +
 .../stats_partialscan_autogether.q.out          |    2 -
 .../clientnegative/udf_assert_true.q.out        |   12 +-
 .../clientnegative/udf_assert_true2.q.out       |    6 +-
 .../clientpositive/alter_file_format.q.out      |   19 -
 .../clientpositive/alter_merge_orc.q.out        |   24 +-
 .../clientpositive/alter_merge_stats_orc.q.out  |   30 +-
 .../alter_numbuckets_partitioned_table.q.out    |   16 -
 .../alter_numbuckets_partitioned_table2.q.out   |   27 -
 ...lter_numbuckets_partitioned_table2_h23.q.out |   27 -
 ...alter_numbuckets_partitioned_table_h23.q.out |   16 -
 .../alter_partition_change_col.q.out            |    8 +-
 .../alter_partition_clusterby_sortby.q.out      |    7 -
 .../clientpositive/alter_skewed_table.q.out     |    6 -
 .../clientpositive/alter_table_cascade.q.out    |    8 +-
 .../clientpositive/alter_table_not_sorted.q.out |    2 -
 .../clientpositive/alter_table_serde2.q.out     |    6 -
 .../clientpositive/alter_view_as_select.q.out   |    3 -
 .../clientpositive/annotate_stats_groupby.q.out |   28 +-
 .../annotate_stats_groupby2.q.out               |    8 +-
 .../annotate_stats_join_pkfk.q.out              |   20 +-
 .../clientpositive/annotate_stats_part.q.out    |    6 +-
 .../clientpositive/annotate_stats_select.q.out  |   52 +-
 .../clientpositive/annotate_stats_table.q.out   |    4 +-
 .../clientpositive/authorization_index.q.out    |    1 -
 .../results/clientpositive/auto_join12.q.out    |   54 +-
 .../results/clientpositive/auto_join13.q.out    |   26 +-
 .../results/clientpositive/auto_join5.q.out     |    8 +-
 .../results/clientpositive/auto_join8.q.out     |    2 +-
 .../auto_join_without_localtask.q.out           |  218 +-
 .../results/clientpositive/ba_table_union.q.out |   16 +-
 .../test/results/clientpositive/bucket5.q.out   |    1 -
 .../clientpositive/cbo_rp_auto_join0.q.out      |   32 +-
 .../clientpositive/cbo_rp_auto_join1.q.out      |  619 ++--
 .../results/clientpositive/cbo_rp_join0.q.out   |  185 +-
 .../test/results/clientpositive/cluster.q.out   |   20 +-
 .../test/results/clientpositive/combine2.q.out  |   16 +-
 .../constantPropagateForSubQuery.q.out          |   38 +-
 .../clientpositive/correlationoptimizer15.q.out |  120 +-
 .../clientpositive/correlationoptimizer6.q.out  | 1011 +++---
 ql/src/test/results/clientpositive/cp_sel.q.out |  195 ++
 .../create_alter_list_bucketing_table1.q.out    |    7 -
 .../results/clientpositive/create_like.q.out    |    9 -
 .../results/clientpositive/create_like2.q.out   |    1 -
 .../clientpositive/create_like_tbl_props.q.out  |    5 -
 .../clientpositive/create_like_view.q.out       |    4 -
 .../clientpositive/create_or_replace_view.q.out |    5 -
 .../clientpositive/create_skewed_table1.q.out   |    3 -
 .../results/clientpositive/create_view.q.out    |   14 -
 .../create_view_partitioned.q.out               |    3 -
 .../clientpositive/create_view_translate.q.out  |    2 -
 .../results/clientpositive/cross_join.q.out     |    8 +-
 ql/src/test/results/clientpositive/ctas.q.out   |    5 -
 .../results/clientpositive/ctas_colname.q.out   |    7 -
 .../results/clientpositive/ctas_hadoop20.q.out  |    5 -
 .../ctas_uses_database_location.q.out           |    1 -
 .../clientpositive/database_location.q.out      |    2 -
 .../results/clientpositive/decimal_serde.q.out  |    2 -
 .../clientpositive/default_file_format.q.out    |    5 -
 .../describe_comment_indent.q.out               |    1 -
 .../describe_comment_nonascii.q.out             |    1 -
 .../describe_formatted_view_partitioned.q.out   |    2 -
 .../clientpositive/describe_syntax.q.out        |    6 -
 .../results/clientpositive/describe_table.q.out |    7 -
 .../dynpart_sort_opt_vectorization.q.out        |   48 +-
 .../dynpart_sort_optimization.q.out             |   32 -
 .../dynpart_sort_optimization2.q.out            |   32 +-
 .../dynpart_sort_optimization_acid.q.out        |   60 +-
 .../clientpositive/encoding_nonutf8.q.out       |   36 +
 .../encryption_insert_partition_dynamic.q.out   |   18 +-
 .../encryption_insert_partition_static.q.out    |  739 +---
 .../encrypted/encryption_insert_values.q.out    |    1 -
 .../clientpositive/exim_hidden_files.q.out      |    1 -
 .../extrapolate_part_stats_full.q.out           |   24 +-
 .../extrapolate_part_stats_partial.q.out        |   76 +-
 .../extrapolate_part_stats_partial_ndv.q.out    |   38 +-
 .../clientpositive/fouter_join_ppr.q.out        | 1694 +++++++++
 .../clientpositive/groupby_sort_1_23.q.out      |   10 +-
 .../clientpositive/groupby_sort_skew_1_23.q.out |   10 +-
 .../clientpositive/index_auto_mult_tables.q.out |   12 +
 .../index_auto_mult_tables_compact.q.out        |    9 +
 .../clientpositive/index_auto_partitioned.q.out |    9 +
 .../clientpositive/index_auto_update.q.out      |    2 +
 .../results/clientpositive/index_bitmap.q.out   |   24 +
 .../index_bitmap_auto_partitioned.q.out         |   12 +
 .../clientpositive/index_bitmap_rc.q.out        |   24 +
 .../results/clientpositive/index_compact.q.out  |   18 +
 .../clientpositive/index_compact_2.q.out        |   18 +
 .../clientpositive/index_skewtable.q.out        |    1 -
 .../clientpositive/infer_bucket_sort.q.out      |   50 -
 .../infer_bucket_sort_bucketed_table.q.out      |    2 -
 .../infer_bucket_sort_convert_join.q.out        |    4 -
 .../infer_bucket_sort_dyn_part.q.out            |   16 -
 .../infer_bucket_sort_grouping_operators.q.out  |   12 -
 .../infer_bucket_sort_list_bucket.q.out         |    4 -
 .../infer_bucket_sort_map_operators.q.out       |    8 -
 .../infer_bucket_sort_merge.q.out               |    4 -
 .../infer_bucket_sort_multi_insert.q.out        |   16 -
 .../infer_bucket_sort_num_buckets.q.out         |    4 -
 .../infer_bucket_sort_reducers_power_two.q.out  |   12 -
 ql/src/test/results/clientpositive/input7.q.out |    2 +-
 .../results/clientpositive/input_part10.q.out   |    5 +-
 .../results/clientpositive/insert_into5.q.out   |    5 +-
 .../insert_non_utf8_encoding_table.q.out        |   89 +
 ql/src/test/results/clientpositive/join12.q.out |   28 +-
 ql/src/test/results/clientpositive/join13.q.out |   32 +-
 ql/src/test/results/clientpositive/join32.q.out |   36 +-
 .../clientpositive/join32_lessSize.q.out        |  118 +-
 ql/src/test/results/clientpositive/join33.q.out |   36 +-
 ql/src/test/results/clientpositive/join34.q.out |    2 +-
 ql/src/test/results/clientpositive/join35.q.out |    2 +-
 ql/src/test/results/clientpositive/join5.q.out  |   20 +-
 ql/src/test/results/clientpositive/join8.q.out  |    2 +-
 .../clientpositive/join_alt_syntax.q.out        |  104 +-
 .../clientpositive/join_cond_pushdown_1.q.out   |   42 +-
 .../clientpositive/join_cond_pushdown_2.q.out   |   62 +-
 .../clientpositive/join_cond_pushdown_3.q.out   |   42 +-
 .../clientpositive/join_cond_pushdown_4.q.out   |   62 +-
 .../results/clientpositive/join_merging.q.out   |  117 +-
 .../results/clientpositive/join_nulls.q.out     |    2 +-
 .../results/clientpositive/lateral_view.q.out   |   68 +-
 .../clientpositive/lateral_view_explode2.q.out  |    6 +-
 .../clientpositive/lateral_view_noalias.q.out   |   22 +-
 .../results/clientpositive/lb_fs_stats.q.out    |    2 -
 .../results/clientpositive/limit_pushdown.q.out |   98 +-
 .../test/results/clientpositive/lineage1.q.out  |    4 +-
 .../test/results/clientpositive/lineage2.q.out  | 2905 ++++++++++++++++
 .../test/results/clientpositive/lineage3.q.out  | 2482 +++++++++++++
 .../clientpositive/list_bucket_dml_1.q.out      |    4 -
 .../list_bucket_dml_10.q.java1.7.out            |    2 -
 .../list_bucket_dml_10.q.java1.8.out            |    2 -
 .../list_bucket_dml_11.q.java1.7.out            |    2 -
 .../list_bucket_dml_11.q.java1.8.out            |    2 -
 .../list_bucket_dml_12.q.java1.7.out            |    4 +-
 .../list_bucket_dml_12.q.java1.8.out            |    2 -
 .../list_bucket_dml_13.q.java1.7.out            |    4 +-
 .../list_bucket_dml_13.q.java1.8.out            |    2 -
 .../clientpositive/list_bucket_dml_14.q.out     |    1 -
 .../list_bucket_dml_2.q.java1.7.out             |    2 -
 .../list_bucket_dml_2.q.java1.8.out             |    2 -
 .../clientpositive/list_bucket_dml_3.q.out      |    2 -
 .../list_bucket_dml_4.q.java1.7.out             |    4 -
 .../list_bucket_dml_4.q.java1.8.out             |    4 -
 .../list_bucket_dml_5.q.java1.7.out             |    4 -
 .../list_bucket_dml_5.q.java1.8.out             |    4 -
 .../list_bucket_dml_6.q.java1.7.out             |    8 -
 .../list_bucket_dml_6.q.java1.8.out             |    8 -
 .../clientpositive/list_bucket_dml_7.q.out      |    8 -
 .../list_bucket_dml_8.q.java1.7.out             |    6 -
 .../list_bucket_dml_8.q.java1.8.out             |    6 -
 .../list_bucket_dml_9.q.java1.7.out             |    4 -
 .../list_bucket_dml_9.q.java1.8.out             |    4 -
 .../list_bucket_query_multiskew_1.q.out         |    2 -
 .../list_bucket_query_multiskew_2.q.out         |    2 -
 .../list_bucket_query_multiskew_3.q.out         |    6 -
 .../list_bucket_query_oneskew_1.q.out           |    2 -
 .../list_bucket_query_oneskew_2.q.out           |    2 -
 .../list_bucket_query_oneskew_3.q.out           |    2 -
 .../clientpositive/load_dyn_part13.q.out        |    8 +-
 .../clientpositive/load_dyn_part14.q.out        |   63 +-
 .../test/results/clientpositive/load_orc.q.out  |   43 +
 .../results/clientpositive/load_orc_part.q.out  |   70 +
 .../clientpositive/louter_join_ppr.q.out        |   74 +-
 .../clientpositive/mapjoin_mapjoin.q.out        |  120 +-
 ql/src/test/results/clientpositive/merge3.q.out |    1 -
 .../results/clientpositive/multiMapJoin1.q.out  |   10 +-
 .../results/clientpositive/multi_insert.q.out   |   32 +-
 ...i_insert_move_tasks_share_dependencies.q.out |   32 +-
 .../results/clientpositive/optional_outer.q.out |   36 +-
 .../results/clientpositive/orc_analyze.q.out    |   48 -
 .../results/clientpositive/orc_create.q.out     |    6 -
 .../orc_dictionary_threshold.q.out              |    2 +-
 .../clientpositive/outer_join_ppr.q.java1.7.out |  168 +-
 .../clientpositive/parallel_orderby.q.out       |    2 -
 .../parquet_array_null_element.q.out            |    1 -
 .../results/clientpositive/parquet_create.q.out |    1 -
 .../clientpositive/parquet_partitioned.q.out    |    1 -
 .../results/clientpositive/parquet_serde.q.out  |    5 -
 .../clientpositive/part_inherit_tbl_props.q.out |    2 -
 .../part_inherit_tbl_props_empty.q.out          |    2 -
 .../part_inherit_tbl_props_with_star.q.out      |    2 -
 .../partition_coltype_literals.q.out            |   16 -
 .../results/clientpositive/ppd_gby_join.q.out   |  104 +-
 .../test/results/clientpositive/ppd_join.q.out  |  106 +-
 .../test/results/clientpositive/ppd_join2.q.out |   88 +-
 .../test/results/clientpositive/ppd_join3.q.out |  114 +-
 .../clientpositive/ppd_outer_join4.q.out        |   88 +-
 .../results/clientpositive/ppd_random.q.out     |   80 +-
 .../results/clientpositive/ppd_udf_case.q.out   |   40 +-
 .../results/clientpositive/ppd_union_view.q.out |   78 +-
 .../results/clientpositive/protectmode2.q.out   |    2 -
 ql/src/test/results/clientpositive/ptf.q.out    |   28 +-
 .../clientpositive/rcfile_default_format.q.out  |    8 -
 .../clientpositive/rcfile_null_value.q.out      |   20 +-
 .../clientpositive/router_join_ppr.q.out        |  170 +-
 .../clientpositive/selectDistinctStar.q.out     |    2 -
 .../clientpositive/select_same_col.q.out        |    8 +-
 .../test/results/clientpositive/skewjoin.q.out  |   46 +-
 .../clientpositive/spark/alter_merge_orc.q.out  |   24 +-
 .../spark/alter_merge_stats_orc.q.out           |   30 +-
 .../clientpositive/spark/auto_join12.q.out      |   54 +-
 .../clientpositive/spark/auto_join13.q.out      |   40 +-
 .../clientpositive/spark/auto_join5.q.out       |   10 +-
 .../clientpositive/spark/auto_join8.q.out       |    2 +-
 .../spark/auto_join_without_localtask.q.out     |   90 +-
 .../results/clientpositive/spark/bucket5.q.out  |    1 -
 .../clientpositive/spark/cross_join.q.out       |    8 +-
 .../results/clientpositive/spark/ctas.q.out     |    5 -
 .../spark/groupby_sort_1_23.q.out               |   10 +-
 .../spark/groupby_sort_skew_1_23.q.out          |   10 +-
 .../infer_bucket_sort_bucketed_table.q.out      |    2 -
 .../spark/infer_bucket_sort_convert_join.q.out  |    4 -
 .../spark/infer_bucket_sort_map_operators.q.out |    8 -
 .../spark/infer_bucket_sort_merge.q.out         |    4 -
 .../spark/infer_bucket_sort_num_buckets.q.out   |    4 -
 .../infer_bucket_sort_reducers_power_two.q.out  |   12 -
 .../results/clientpositive/spark/join12.q.out   |   26 +-
 .../results/clientpositive/spark/join13.q.out   |   82 +-
 .../results/clientpositive/spark/join32.q.out   |  102 +-
 .../clientpositive/spark/join32_lessSize.q.out  |  232 +-
 .../results/clientpositive/spark/join33.q.out   |  102 +-
 .../results/clientpositive/spark/join34.q.out   |    2 +-
 .../results/clientpositive/spark/join35.q.out   |    2 +-
 .../results/clientpositive/spark/join5.q.out    |   20 +-
 .../results/clientpositive/spark/join8.q.out    |    2 +-
 .../clientpositive/spark/join_alt_syntax.q.out  |  272 +-
 .../spark/join_cond_pushdown_1.q.out            |   70 +-
 .../spark/join_cond_pushdown_2.q.out            |  134 +-
 .../spark/join_cond_pushdown_3.q.out            |   70 +-
 .../spark/join_cond_pushdown_4.q.out            |  134 +-
 .../clientpositive/spark/join_merging.q.out     |  115 +-
 .../spark/lateral_view_explode2.q.out           |    6 +-
 .../clientpositive/spark/limit_pushdown.q.out   |   80 +-
 .../spark/list_bucket_dml_10.q.java1.7.out      |    2 -
 .../spark/list_bucket_dml_10.q.java1.8.out      |    2 -
 .../spark/list_bucket_dml_2.q.java1.7.out       |    2 -
 .../spark/list_bucket_dml_2.q.java1.8.out       |    2 -
 .../spark/list_bucket_dml_2.q.out               |  Bin 28747 -> 28667 bytes
 .../clientpositive/spark/load_dyn_part13.q.out  |    8 +-
 .../clientpositive/spark/load_dyn_part14.q.out  |   57 +-
 .../clientpositive/spark/louter_join_ppr.q.out  |  172 +-
 .../clientpositive/spark/mapjoin_mapjoin.q.out  |  202 +-
 .../clientpositive/spark/multi_insert.q.out     |   32 +-
 ...i_insert_move_tasks_share_dependencies.q.out |   32 +-
 .../clientpositive/spark/orc_analyze.q.out      |   22 -
 .../spark/outer_join_ppr.q.java1.7.out          |  362 +-
 .../clientpositive/spark/parallel_orderby.q.out |    2 -
 .../clientpositive/spark/ppd_gby_join.q.out     |  106 +-
 .../results/clientpositive/spark/ppd_join.q.out |  110 +-
 .../clientpositive/spark/ppd_join2.q.out        |   86 +-
 .../clientpositive/spark/ppd_join3.q.out        |  116 +-
 .../clientpositive/spark/ppd_outer_join4.q.out  |  124 +-
 .../test/results/clientpositive/spark/ptf.q.out |   28 +-
 .../clientpositive/spark/router_join_ppr.q.out  |  370 +-
 .../results/clientpositive/spark/skewjoin.q.out |   46 +-
 .../results/clientpositive/spark/stats1.q.out   |    2 -
 .../results/clientpositive/spark/stats10.q.out  |    5 -
 .../results/clientpositive/spark/stats12.q.out  |    9 -
 .../results/clientpositive/spark/stats13.q.out  |   10 -
 .../results/clientpositive/spark/stats14.q.out  |    7 -
 .../results/clientpositive/spark/stats15.q.out  |    7 -
 .../results/clientpositive/spark/stats16.q.out  |    2 -
 .../results/clientpositive/spark/stats18.q.out  |    4 -
 .../results/clientpositive/spark/stats2.q.out   |    2 -
 .../results/clientpositive/spark/stats20.q.out  |    2 -
 .../results/clientpositive/spark/stats3.q.out   |    2 -
 .../results/clientpositive/spark/stats5.q.out   |    1 -
 .../results/clientpositive/spark/stats6.q.out   |    9 -
 .../results/clientpositive/spark/stats7.q.out   |    5 -
 .../results/clientpositive/spark/stats8.q.out   |   18 -
 .../results/clientpositive/spark/stats9.q.out   |    1 -
 .../clientpositive/spark/stats_counter.q.out    |    2 -
 .../spark/stats_counter_partitioned.q.out       |   16 -
 .../clientpositive/spark/stats_noscan_1.q.out   |   17 -
 .../clientpositive/spark/stats_noscan_2.q.out   |    6 -
 .../clientpositive/spark/stats_only_null.q.out  |    4 -
 .../spark/stats_partscan_1_23.q.out             |    6 -
 .../results/clientpositive/spark/statsfs.q.out  |   14 -
 .../results/clientpositive/spark/union22.q.out  |    4 +-
 .../results/clientpositive/spark/union28.q.out  |    4 +-
 .../results/clientpositive/spark/union29.q.out  |    4 +-
 .../results/clientpositive/spark/union30.q.out  |    4 +-
 .../results/clientpositive/spark/union33.q.out  |    4 +-
 .../clientpositive/spark/union_date_trim.q.out  |    4 +-
 .../clientpositive/spark/union_remove_1.q.out   |    5 +-
 .../clientpositive/spark/union_remove_10.q.out  |    3 +-
 .../clientpositive/spark/union_remove_11.q.out  |    3 +-
 .../clientpositive/spark/union_remove_12.q.out  |    3 +-
 .../clientpositive/spark/union_remove_13.q.out  |    1 -
 .../clientpositive/spark/union_remove_14.q.out  |    3 +-
 .../clientpositive/spark/union_remove_15.q.out  |    9 +-
 .../clientpositive/spark/union_remove_16.q.out  |    9 +-
 .../clientpositive/spark/union_remove_17.q.out  |    5 +-
 .../clientpositive/spark/union_remove_18.q.out  |   25 +-
 .../clientpositive/spark/union_remove_19.q.out  |   13 +-
 .../clientpositive/spark/union_remove_2.q.out   |    3 +-
 .../clientpositive/spark/union_remove_20.q.out  |    5 +-
 .../clientpositive/spark/union_remove_21.q.out  |    3 +-
 .../clientpositive/spark/union_remove_22.q.out  |   13 +-
 .../clientpositive/spark/union_remove_23.q.out  |    3 +-
 .../clientpositive/spark/union_remove_24.q.out  |    5 +-
 .../clientpositive/spark/union_remove_25.q.out  |   22 +-
 .../clientpositive/spark/union_remove_3.q.out   |    3 +-
 .../clientpositive/spark/union_remove_4.q.out   |    5 +-
 .../clientpositive/spark/union_remove_5.q.out   |    3 +-
 .../clientpositive/spark/union_remove_6.q.out   |    8 +-
 .../spark/union_remove_6_subq.q.out             |    8 +-
 .../clientpositive/spark/union_remove_7.q.out   |    5 +-
 .../clientpositive/spark/union_remove_8.q.out   |    3 +-
 .../clientpositive/spark/union_remove_9.q.out   |    3 +-
 .../clientpositive/spark/union_top_level.q.out  |  358 +-
 .../clientpositive/spark/vectorized_ptf.q.out   |  136 +-
 .../clientpositive/spark/windowing.q.out        |   40 +-
 ql/src/test/results/clientpositive/stats1.q.out |    2 -
 .../test/results/clientpositive/stats10.q.out   |    5 -
 .../test/results/clientpositive/stats11.q.out   |    8 -
 .../test/results/clientpositive/stats12.q.out   |    9 -
 .../test/results/clientpositive/stats13.q.out   |   10 -
 .../test/results/clientpositive/stats14.q.out   |    7 -
 .../test/results/clientpositive/stats15.q.out   |    7 -
 .../test/results/clientpositive/stats16.q.out   |    2 -
 .../test/results/clientpositive/stats18.q.out   |    4 -
 .../test/results/clientpositive/stats19.q.out   |   12 -
 ql/src/test/results/clientpositive/stats2.q.out |    2 -
 .../test/results/clientpositive/stats20.q.out   |    2 -
 ql/src/test/results/clientpositive/stats3.q.out |    2 -
 ql/src/test/results/clientpositive/stats4.q.out |   10 -
 ql/src/test/results/clientpositive/stats5.q.out |    1 -
 ql/src/test/results/clientpositive/stats6.q.out |    9 -
 ql/src/test/results/clientpositive/stats7.q.out |    5 -
 ql/src/test/results/clientpositive/stats8.q.out |   18 -
 ql/src/test/results/clientpositive/stats9.q.out |    1 -
 .../results/clientpositive/stats_counter.q.out  |    2 -
 .../stats_counter_partitioned.q.out             |   16 -
 .../clientpositive/stats_empty_partition.q.out  |    2 -
 .../clientpositive/stats_invalidation.q.out     |    2 -
 .../stats_list_bucket.q.java1.7.out             |    3 -
 .../stats_list_bucket.q.java1.8.out             |    3 -
 .../results/clientpositive/stats_noscan_1.q.out |   17 -
 .../results/clientpositive/stats_noscan_2.q.out |    6 -
 .../clientpositive/stats_only_null.q.out        |    4 -
 .../clientpositive/stats_partscan_1.q.out       |    6 -
 .../clientpositive/stats_partscan_1_23.q.out    |    6 -
 .../test/results/clientpositive/statsfs.q.out   |   14 -
 .../temp_table_windowing_expressions.q.out      |    8 +-
 .../clientpositive/tez/alter_merge_orc.q.out    |   24 +-
 .../tez/alter_merge_stats_orc.q.out             |   30 +-
 .../clientpositive/tez/constprog_dpp.q.out      |  113 +
 .../results/clientpositive/tez/cross_join.q.out |    8 +-
 .../test/results/clientpositive/tez/ctas.q.out  |    5 -
 .../tez/dynpart_sort_opt_vectorization.q.out    |   48 +-
 .../tez/dynpart_sort_optimization.q.out         |   32 -
 .../tez/dynpart_sort_optimization2.q.out        |   32 +-
 .../clientpositive/tez/explainuser_1.q.out      | 1729 +++++----
 .../clientpositive/tez/explainuser_2.q.out      | 3270 +++++++++---------
 .../results/clientpositive/tez/insert1.q.out    |  392 +--
 .../clientpositive/tez/limit_pushdown.q.out     |   78 +-
 .../clientpositive/tez/mapjoin_mapjoin.q.out    |  266 +-
 .../test/results/clientpositive/tez/mrr.q.out   |   82 +-
 .../clientpositive/tez/orc_analyze.q.out        |   48 -
 .../results/clientpositive/tez/orc_merge9.q.out |  186 +
 .../test/results/clientpositive/tez/ptf.q.out   |   28 +-
 .../clientpositive/tez/selectDistinctStar.q.out |    2 -
 .../results/clientpositive/tez/skewjoin.q.out   |   26 +-
 .../clientpositive/tez/stats_counter.q.out      |    2 -
 .../tez/stats_counter_partitioned.q.out         |   16 -
 .../clientpositive/tez/stats_noscan_1.q.out     |   17 -
 .../clientpositive/tez/stats_only_null.q.out    |    4 -
 .../results/clientpositive/tez/tez_fsstat.q.out |    2 -
 .../results/clientpositive/tez/tez_union.q.out  |   88 +-
 .../tez/tez_union_dynamic_partition.q.out       |   44 +-
 .../tez/tez_union_multiinsert.q.out             |    4 +-
 .../clientpositive/tez/unionDistinct_1.q.out    |   20 +-
 .../clientpositive/tez/vector_coalesce.q.out    |    6 +-
 .../clientpositive/tez/vector_decimal_2.q.out   |   55 +-
 .../tez/vector_decimal_round_2.q.out            |    8 +-
 .../clientpositive/tez/vector_interval_1.q.out  |   24 +-
 .../clientpositive/tez/vectorized_ptf.q.out     |  136 +-
 .../clientpositive/truncate_column.q.out        |   11 -
 .../results/clientpositive/udtf_stack.q.out     |   12 +-
 .../clientpositive/unicode_notation.q.out       |    3 -
 .../test/results/clientpositive/union22.q.out   |    4 +-
 .../test/results/clientpositive/union28.q.out   |    4 +-
 .../test/results/clientpositive/union29.q.out   |    4 +-
 .../test/results/clientpositive/union30.q.out   |    4 +-
 .../test/results/clientpositive/union33.q.out   |    4 +-
 .../clientpositive/unionDistinct_1.q.out        |   20 +-
 .../clientpositive/union_date_trim.q.out        |    4 +-
 .../results/clientpositive/union_remove_1.q.out |    5 +-
 .../clientpositive/union_remove_10.q.out        |    3 +-
 .../clientpositive/union_remove_11.q.out        |    3 +-
 .../clientpositive/union_remove_12.q.out        |    3 +-
 .../clientpositive/union_remove_13.q.out        |    1 -
 .../clientpositive/union_remove_14.q.out        |    3 +-
 .../clientpositive/union_remove_15.q.out        |    9 +-
 .../clientpositive/union_remove_16.q.out        |    9 +-
 .../clientpositive/union_remove_17.q.out        |    5 +-
 .../clientpositive/union_remove_18.q.out        |   25 +-
 .../clientpositive/union_remove_19.q.out        |   13 +-
 .../results/clientpositive/union_remove_2.q.out |    3 +-
 .../clientpositive/union_remove_20.q.out        |    5 +-
 .../clientpositive/union_remove_21.q.out        |    3 +-
 .../clientpositive/union_remove_22.q.out        |   13 +-
 .../clientpositive/union_remove_23.q.out        |    3 +-
 .../clientpositive/union_remove_24.q.out        |    5 +-
 .../clientpositive/union_remove_25.q.out        |   22 +-
 .../results/clientpositive/union_remove_3.q.out |    3 +-
 .../results/clientpositive/union_remove_4.q.out |    5 +-
 .../results/clientpositive/union_remove_5.q.out |    3 +-
 .../results/clientpositive/union_remove_6.q.out |    8 +-
 .../clientpositive/union_remove_6_subq.q.out    |    8 +-
 .../results/clientpositive/union_remove_7.q.out |    5 +-
 .../results/clientpositive/union_remove_8.q.out |    3 +-
 .../results/clientpositive/union_remove_9.q.out |    3 +-
 .../clientpositive/union_top_level.q.out        |  316 +-
 .../clientpositive/vector_coalesce.q.out        |    6 +-
 .../clientpositive/vector_decimal_2.q.out       |   55 +-
 .../clientpositive/vector_decimal_round_2.q.out |    8 +-
 .../clientpositive/vector_interval_1.q.out      |   24 +-
 .../vectorization_part_varchar.q.out            |   72 +
 .../results/clientpositive/vectorized_ptf.q.out |  132 +-
 .../test/results/clientpositive/windowing.q.out |   40 +-
 .../clientpositive/windowing_expressions.q.out  |    8 +-
 .../hadoop/hive/ql/io/sarg/ExpressionTree.java  |  157 +
 .../hadoop/hive/ql/io/sarg/SearchArgument.java  |   14 +-
 .../hive/serde2/AbstractEncodingAwareSerDe.java |    7 +-
 .../apache/hadoop/hive/serde2/SerDeUtils.java   |    4 +-
 .../hive/serde2/avro/AvroDeserializer.java      |   11 +-
 .../objectinspector/ObjectInspectorUtils.java   |   18 +-
 .../hive/serde2/avro/TestAvroDeserializer.java  |   63 +-
 .../avro/TestAvroObjectInspectorGenerator.java  |   33 +
 .../TestObjectInspectorUtils.java               |   22 +
 .../service/cli/session/HiveSessionBase.java    |    2 -
 .../service/cli/session/HiveSessionImpl.java    |   74 +-
 .../cli/session/HiveSessionImplwithUGI.java     |   63 +-
 .../service/cli/thrift/ThriftCLIService.java    |   21 +-
 .../apache/hive/service/server/HiveServer2.java |    6 +-
 .../apache/hive/service/cli/CLIServiceTest.java |  135 +-
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |    5 +-
 906 files changed, 41224 insertions(+), 14620 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e6adedc1/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6adedc1/beeline/src/java/org/apache/hive/beeline/Commands.java
----------------------------------------------------------------------
diff --cc beeline/src/java/org/apache/hive/beeline/Commands.java
index 8c406a3,3cdcfb8..01349e2
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@@ -46,8 -43,8 +46,9 @@@ import java.sql.ResultSet
  import java.sql.SQLException;
  import java.sql.Statement;
  import java.sql.SQLWarning;
+ import java.util.ArrayList;
  import java.util.Arrays;
 +import java.util.HashMap;
  import java.util.Iterator;
  import java.util.LinkedList;
  import java.util.List;
@@@ -1091,27 -798,42 +1093,36 @@@ public class Commands 
      }
  
      line = line.trim();
-     String[] cmds;
+     List<String> cmdList = new ArrayList<String>();
      if (entireLineAsCommand) {
-       cmds = new String[1];
-       cmds[0] = line;
+       cmdList.add(line);
      } else {
-       cmds = line.split(";");
+       StringBuffer command = new StringBuffer();
+       for (String cmdpart: line.split(";")) {
+         if (cmdpart.endsWith("\\")) {
+           command.append(cmdpart.substring(0, cmdpart.length() -1)).append(";");
+           continue;
+         } else {
+           command.append(cmdpart);
+         }
+         cmdList.add(command.toString());
+         command.setLength(0);
+       }
      }
-     for (int i = 0; i < cmds.length; i++) {
-       String sql = cmds[i].trim();
+     for (int i = 0; i < cmdList.size(); i++) {
+       String sql = cmdList.get(i).trim();
        if (sql.length() != 0) {
 -        if (beeLine.isComment(sql)) {
 -          //skip this and rest cmds in the line
 -          break;
 -        }
 -        if (sql.startsWith(BeeLine.COMMAND_PREFIX)) {
 -          sql = sql.substring(1);
 -        }
 -
 -        String prefix = call ? "call" : "sql";
 -
 -        if (sql.startsWith(prefix)) {
 -          sql = sql.substring(prefix.length());
 +        if (!beeLine.isBeeLine()) {
 +          sql = cliToBeelineCmd(sql);
 +          if (sql.equalsIgnoreCase("quit") || sql.equalsIgnoreCase("exit")) {
 +            beeLine.setExit(true);
 +            return true;
 +          }
          }
  
 -        // batch statements?
 -        if (beeLine.getBatch() != null) {
 -          beeLine.getBatch().add(sql);
 +        // is source CMD
 +        if (isSourceCMD(sql)) {
 +          sourceFile(sql);
            continue;
          }
  

http://git-wip-us.apache.org/repos/asf/hive/blob/e6adedc1/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------


[11/50] [abbrv] hive git commit: HIVE-4239 : Remove lock on compilation stage (Sergey Shelukhin, reviewed by Thejas M Nair)

Posted by xu...@apache.org.
HIVE-4239 : Remove lock on compilation stage (Sergey Shelukhin, reviewed by Thejas M Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/be89eac6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/be89eac6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/be89eac6

Branch: refs/heads/beeline-cli
Commit: be89eac6e119f8aac09782da96b00f4b9a4b062c
Parents: 08595ff
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu Jul 9 11:14:43 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu Jul 9 11:14:43 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  31 +++--
 .../optimizer/RemoveDynamicPruningBySize.java   |   2 +-
 .../hadoop/hive/ql/parse/GenTezProcContext.java |   8 ++
 .../hadoop/hive/ql/parse/GenTezUtils.java       |  59 +++-----
 .../apache/hadoop/hive/ql/parse/GenTezWork.java |  10 +-
 .../hadoop/hive/ql/parse/TezCompiler.java       |  14 +-
 .../hadoop/hive/ql/session/SessionState.java    |   8 ++
 .../service/cli/session/HiveSessionImpl.java    |  61 ++++++---
 .../cli/session/HiveSessionImplwithUGI.java     |   3 +-
 .../apache/hive/service/cli/CLIServiceTest.java | 135 +++++++++++++++++--
 11 files changed, 245 insertions(+), 88 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 4549105..39477d6 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1789,6 +1789,8 @@ public class HiveConf extends Configuration {
         "Transport mode of HiveServer2."),
     HIVE_SERVER2_THRIFT_BIND_HOST("hive.server2.thrift.bind.host", "",
         "Bind host on which to run the HiveServer2 Thrift service."),
+    HIVE_SERVER2_PARALLEL_COMPILATION("hive.driver.parallel.compilation", false, "Whether to\n" +
+        "enable parallel compilation between sessions on HiveServer2. The default is false."),
 
     // http (over thrift) transport settings
     HIVE_SERVER2_THRIFT_HTTP_PORT("hive.server2.thrift.http.port", 10001,

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index e04165b..934cb42 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -32,6 +32,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
+import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
@@ -125,12 +126,11 @@ public class Driver implements CommandProcessor {
   static final private Log LOG = LogFactory.getLog(CLASS_NAME);
   static final private LogHelper console = new LogHelper(LOG);
 
-  private static final Object compileMonitor = new Object();
-
   private int maxRows = 100;
   ByteStream.Output bos = new ByteStream.Output();
 
-  private HiveConf conf;
+  private final HiveConf conf;
+  private final boolean isParallelEnabled;
   private DataInput resStream;
   private Context ctx;
   private DriverContext driverCxt;
@@ -193,7 +193,7 @@ public class Driver implements CommandProcessor {
   /**
    * Get a Schema with fields represented with native Hive types
    */
-  public static Schema getSchema(BaseSemanticAnalyzer sem, HiveConf conf) {
+  private static Schema getSchema(BaseSemanticAnalyzer sem, HiveConf conf) {
     Schema schema = null;
 
     // If we have a plan, prefer its logical result schema if it's
@@ -284,6 +284,8 @@ public class Driver implements CommandProcessor {
    */
   public Driver(HiveConf conf) {
     this.conf = conf;
+    isParallelEnabled = (conf != null)
+        && HiveConf.getBoolVar(conf, ConfVars.HIVE_SERVER2_PARALLEL_COMPILATION);
   }
 
   public Driver(HiveConf conf, String userName) {
@@ -292,9 +294,9 @@ public class Driver implements CommandProcessor {
   }
 
   public Driver() {
-    if (SessionState.get() != null) {
-      conf = SessionState.get().getConf();
-    }
+    conf = (SessionState.get() != null) ? SessionState.get().getConf() : null;
+    isParallelEnabled = (conf != null)
+        && HiveConf.getBoolVar(conf, ConfVars.HIVE_SERVER2_PARALLEL_COMPILATION);
   }
 
   /**
@@ -1118,10 +1120,23 @@ public class Driver implements CommandProcessor {
     return createProcessorResponse(compileInternal(command));
   }
 
+  private static final ReentrantLock globalCompileLock = new ReentrantLock();
   private int compileInternal(String command) {
+    boolean isParallelEnabled = SessionState.get().isHiveServerQuery() && this.isParallelEnabled;
     int ret;
-    synchronized (compileMonitor) {
+    final ReentrantLock compileLock = isParallelEnabled
+        ? SessionState.get().getCompileLock() : globalCompileLock;
+    compileLock.lock();
+    try {
+      if (isParallelEnabled && LOG.isDebugEnabled()) {
+        LOG.debug("Entering compile: " + command);
+      }
       ret = compile(command);
+      if (isParallelEnabled && LOG.isDebugEnabled()) {
+        LOG.debug("Done with compile: " + command);
+      }
+    } finally {
+      compileLock.unlock();
     }
     if (ret != 0) {
       try {

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
index 5d01311..1567326 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
@@ -56,7 +56,7 @@ public class RemoveDynamicPruningBySize implements NodeProcessor {
         (context.pruningOpsRemovedByPriorOpt.isEmpty() ||
          !context.pruningOpsRemovedByPriorOpt.contains(event))) {
       context.pruningOpsRemovedByPriorOpt.add(event);
-      GenTezUtils.getUtils().removeBranch(event);
+      GenTezUtils.removeBranch(event);
       // at this point we've found the fork in the op pipeline that has the pruning as a child plan.
       LOG.info("Disabling dynamic pruning for: "
           + ((DynamicPruningEventDesc) desc).getTableScan().getName()

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
index adc31ae..f474eae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
@@ -82,6 +82,9 @@ public class GenTezProcContext implements NodeProcessorCtx{
   // walk.
   public Operator<? extends OperatorDesc> parentOfRoot;
 
+  // sequence number is used to name vertices (e.g.: Map 1, Reduce 14, ...)
+  private int sequenceNumber = 0;
+
   // tez task we're currently processing
   public TezTask currentTask;
 
@@ -188,4 +191,9 @@ public class GenTezProcContext implements NodeProcessorCtx{
 
     rootTasks.add(currentTask);
   }
+
+  /** Not thread-safe. */
+  public int nextSequenceNumber() {
+     return ++sequenceNumber;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
index 11c1df6..93ad145 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
@@ -61,42 +61,27 @@ import com.google.common.collect.HashBiMap;
 import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.AUTOPARALLEL;
 
 /**
- * GenTezUtils is a collection of shared helper methods to produce
- * TezWork
+ * GenTezUtils is a collection of shared helper methods to produce TezWork.
+ * All the methods in this class should be static, but some aren't; this is to facilitate testing.
+ * Methods are made non-static on as needed basis.
  */
 public class GenTezUtils {
+  static final private Log LOG = LogFactory.getLog(GenTezUtils.class);
 
-  static final private Log LOG = LogFactory.getLog(GenTezUtils.class.getName());
-
-  // sequence number is used to name vertices (e.g.: Map 1, Reduce 14, ...)
-  private int sequenceNumber = 0;
-
-  // singleton
-  private static GenTezUtils utils;
-
-  public static GenTezUtils getUtils() {
-    if (utils == null) {
-      utils = new GenTezUtils();
-    }
-    return utils;
+  public GenTezUtils() {
   }
 
-  protected GenTezUtils() {
-  }
-
-  public void resetSequenceNumber() {
-    sequenceNumber = 0;
-  }
-
-  public UnionWork createUnionWork(GenTezProcContext context, Operator<?> root, Operator<?> leaf, TezWork tezWork) {
-    UnionWork unionWork = new UnionWork("Union "+ (++sequenceNumber));
+  public static UnionWork createUnionWork(
+      GenTezProcContext context, Operator<?> root, Operator<?> leaf, TezWork tezWork) {
+    UnionWork unionWork = new UnionWork("Union "+ context.nextSequenceNumber());
     context.rootUnionWorkMap.put(root, unionWork);
     context.unionWorkMap.put(leaf, unionWork);
     tezWork.add(unionWork);
     return unionWork;
   }
 
-  public ReduceWork createReduceWork(GenTezProcContext context, Operator<?> root, TezWork tezWork) {
+  public static ReduceWork createReduceWork(
+      GenTezProcContext context, Operator<?> root, TezWork tezWork) {
     assert !root.getParentOperators().isEmpty();
 
     boolean isAutoReduceParallelism =
@@ -107,7 +92,7 @@ public class GenTezUtils {
     float minPartitionFactor = context.conf.getFloatVar(HiveConf.ConfVars.TEZ_MIN_PARTITION_FACTOR);
     long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER);
 
-    ReduceWork reduceWork = new ReduceWork(Utilities.REDUCENAME + (++sequenceNumber));
+    ReduceWork reduceWork = new ReduceWork(Utilities.REDUCENAME + context.nextSequenceNumber());
     LOG.debug("Adding reduce work (" + reduceWork.getName() + ") for " + root);
     reduceWork.setReducer(root);
     reduceWork.setNeedsTagging(GenMapRedUtils.needsTagging(reduceWork));
@@ -161,8 +146,8 @@ public class GenTezUtils {
     return reduceWork;
   }
 
-  protected void setupReduceSink(GenTezProcContext context, ReduceWork reduceWork,
-      ReduceSinkOperator reduceSink) {
+  private static void setupReduceSink(
+      GenTezProcContext context, ReduceWork reduceWork, ReduceSinkOperator reduceSink) {
 
     LOG.debug("Setting up reduce sink: " + reduceSink
         + " with following reduce work: " + reduceWork.getName());
@@ -182,7 +167,7 @@ public class GenTezUtils {
   public MapWork createMapWork(GenTezProcContext context, Operator<?> root,
       TezWork tezWork, PrunedPartitionList partitions) throws SemanticException {
     assert root.getParentOperators().isEmpty();
-    MapWork mapWork = new MapWork(Utilities.MAPNAME + (++sequenceNumber));
+    MapWork mapWork = new MapWork(Utilities.MAPNAME + context.nextSequenceNumber());
     LOG.debug("Adding map work (" + mapWork.getName() + ") for " + root);
 
     // map work starts with table scan operators
@@ -213,7 +198,7 @@ public class GenTezUtils {
   }
 
   // removes any union operator and clones the plan
-  public void removeUnionOperators(Configuration conf, GenTezProcContext context,
+  public static void removeUnionOperators(Configuration conf, GenTezProcContext context,
       BaseWork work)
     throws SemanticException {
 
@@ -354,7 +339,7 @@ public class GenTezUtils {
     work.replaceRoots(replacementMap);
   }
 
-  public void processFileSink(GenTezProcContext context, FileSinkOperator fileSink)
+  public static void processFileSink(GenTezProcContext context, FileSinkOperator fileSink)
       throws SemanticException {
 
     ParseContext parseContext = context.parseContext;
@@ -393,8 +378,8 @@ public class GenTezUtils {
    * @param procCtx
    * @param event
    */
-  public void processAppMasterEvent(GenTezProcContext procCtx, AppMasterEventOperator event) {
-
+  public static void processAppMasterEvent(
+      GenTezProcContext procCtx, AppMasterEventOperator event) {
     if (procCtx.abandonedEventOperatorSet.contains(event)) {
       // don't need this anymore
       return;
@@ -444,7 +429,7 @@ public class GenTezUtils {
   /**
    * getEncosingWork finds the BaseWork any given operator belongs to.
    */
-  public BaseWork getEnclosingWork(Operator<?> op, GenTezProcContext procCtx) {
+  public static BaseWork getEnclosingWork(Operator<?> op, GenTezProcContext procCtx) {
     List<Operator<?>> ops = new ArrayList<Operator<?>>();
     findRoots(op, ops);
     for (Operator<?> r : ops) {
@@ -459,7 +444,7 @@ public class GenTezUtils {
   /*
    * findRoots returns all root operators (in ops) that result in operator op
    */
-  private void findRoots(Operator<?> op, List<Operator<?>> ops) {
+  private static void findRoots(Operator<?> op, List<Operator<?>> ops) {
     List<Operator<?>> parents = op.getParentOperators();
     if (parents == null || parents.isEmpty()) {
       ops.add(op);
@@ -474,7 +459,7 @@ public class GenTezUtils {
    * Remove an operator branch. When we see a fork, we know it's time to do the removal.
    * @param event the leaf node of which branch to be removed
    */
-  public void removeBranch(AppMasterEventOperator event) {
+  public static void removeBranch(AppMasterEventOperator event) {
     Operator<?> child = event;
     Operator<?> curr = event;
 
@@ -485,4 +470,4 @@ public class GenTezUtils {
 
     curr.removeChild(child);
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java
index 6db8220..6b3e19d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java
@@ -61,12 +61,8 @@ public class GenTezWork implements NodeProcessor {
 
   static final private Log LOG = LogFactory.getLog(GenTezWork.class.getName());
 
-  // instance of shared utils
-  private GenTezUtils utils = null;
+  private final GenTezUtils utils;
 
-  /**
-   * Constructor takes utils as parameter to facilitate testing
-   */
   public GenTezWork(GenTezUtils utils) {
     this.utils = utils;
   }
@@ -130,7 +126,7 @@ public class GenTezWork implements NodeProcessor {
       if (context.preceedingWork == null) {
         work = utils.createMapWork(context, root, tezWork, null);
       } else {
-        work = utils.createReduceWork(context, root, tezWork);
+        work = GenTezUtils.createReduceWork(context, root, tezWork);
       }
       context.rootToWorkMap.put(root, work);
     }
@@ -295,7 +291,7 @@ public class GenTezWork implements NodeProcessor {
           // if unionWork is null, it means it is the first time. we need to
           // create a union work object and add this work to it. Subsequent 
           // work should reference the union and not the actual work.
-          unionWork = utils.createUnionWork(context, root, operator, tezWork);
+          unionWork = GenTezUtils.createUnionWork(context, root, operator, tezWork);
           // finally connect the union work with work
           connectUnionWorkWithWork(unionWork, work, tezWork, context);
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
index f20393a..9503fa8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
@@ -178,7 +178,7 @@ public class TezCompiler extends TaskCompiler {
       return;
     }
 
-    GenTezUtils.getUtils().removeBranch(victim);
+    GenTezUtils.removeBranch(victim);
     // at this point we've found the fork in the op pipeline that has the pruning as a child plan.
     LOG.info("Disabling dynamic pruning for: "
         + ((DynamicPruningEventDesc) victim.getConf()).getTableScan().toString()
@@ -319,10 +319,10 @@ public class TezCompiler extends TaskCompiler {
       List<Task<MoveWork>> mvTask, Set<ReadEntity> inputs, Set<WriteEntity> outputs)
       throws SemanticException {
 
-    GenTezUtils.getUtils().resetSequenceNumber();
 
     ParseContext tempParseContext = getParseContext(pCtx, rootTasks);
-    GenTezWork genTezWork = new GenTezWork(GenTezUtils.getUtils());
+    GenTezUtils utils = new GenTezUtils();
+    GenTezWork genTezWork = new GenTezWork(utils);
 
     GenTezProcContext procCtx = new GenTezProcContext(
         conf, tempParseContext, mvTask, rootTasks, inputs, outputs);
@@ -351,7 +351,7 @@ public class TezCompiler extends TaskCompiler {
 
     opRules.put(new RuleRegExp("Handle Potential Analyze Command",
         TableScanOperator.getOperatorName() + "%"),
-        new ProcessAnalyzeTable(GenTezUtils.getUtils()));
+        new ProcessAnalyzeTable(utils));
 
     opRules.put(new RuleRegExp("Remember union",
         UnionOperator.getOperatorName() + "%"),
@@ -371,19 +371,19 @@ public class TezCompiler extends TaskCompiler {
 
     // we need to clone some operator plans and remove union operators still
     for (BaseWork w: procCtx.workWithUnionOperators) {
-      GenTezUtils.getUtils().removeUnionOperators(conf, procCtx, w);
+      GenTezUtils.removeUnionOperators(conf, procCtx, w);
     }
 
     // then we make sure the file sink operators are set up right
     for (FileSinkOperator fileSink: procCtx.fileSinkSet) {
-      GenTezUtils.getUtils().processFileSink(procCtx, fileSink);
+      GenTezUtils.processFileSink(procCtx, fileSink);
     }
 
     // and finally we hook up any events that need to be sent to the tez AM
     LOG.debug("There are " + procCtx.eventOperatorSet.size() + " app master events.");
     for (AppMasterEventOperator event : procCtx.eventOperatorSet) {
       LOG.debug("Handling AppMasterEventOperator: " + event);
-      GenTezUtils.getUtils().processAppMasterEvent(procCtx, event);
+      GenTezUtils.processAppMasterEvent(procCtx, event);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 37d856c..0bc9a46 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -38,6 +38,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
@@ -108,6 +109,9 @@ public class SessionState {
 
   protected ClassLoader parentLoader;
 
+  // Session-scope compile lock.
+  private final ReentrantLock compileLock = new ReentrantLock();
+
   /**
    * current configuration.
    */
@@ -319,6 +323,10 @@ public class SessionState {
     this.isSilent = isSilent;
   }
 
+  public ReentrantLock getCompileLock() {
+    return compileLock;
+  }
+
   public boolean getIsVerbose() {
     return isVerbose;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
index 9a20799..a600309 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
@@ -23,6 +23,7 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
+import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -76,17 +77,26 @@ import org.apache.hive.service.server.ThreadWithGarbageCleanup;
  *
  */
 public class HiveSessionImpl implements HiveSession {
+  private static final String FETCH_WORK_SERDE_CLASS =
+      "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe";
+  private static final Log LOG = LogFactory.getLog(HiveSessionImpl.class);
+
+  // Shared between threads (including SessionState!)
   private final SessionHandle sessionHandle;
   private String username;
   private final String password;
-  private HiveConf hiveConf;
+  private final HiveConf hiveConf;
+  // TODO: some SessionState internals are not thread safe. The compile-time internals are synced
+  //       via session-scope or global compile lock. The run-time internals work by magic!
+  //       They probably work because races are relatively unlikely and few tools run parallel
+  //       queries from the same session.
+  //       1) OperationState should be refactored out of SessionState, and made thread-local.
+  //       2) Some parts of session state, like mrStats and vars, need proper synchronization.
   private SessionState sessionState;
   private String ipAddress;
-  private static final String FETCH_WORK_SERDE_CLASS =
-      "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe";
-  private static final Log LOG = LogFactory.getLog(HiveSessionImpl.class);
   private SessionManager sessionManager;
   private OperationManager operationManager;
+  // Synchronized by locking on itself.
   private final Set<OperationHandle> opHandleSet = new HashSet<OperationHandle>();
   private boolean isOperationLogEnabled;
   private File sessionLogDir;
@@ -393,7 +403,7 @@ public class HiveSessionImpl implements HiveSession {
     OperationHandle opHandle = operation.getHandle();
     try {
       operation.run();
-      opHandleSet.add(opHandle);
+      addOpHandle(opHandle);
       return opHandle;
     } catch (HiveSQLException e) {
       // Refering to SQLOperation.java,there is no chance that a HiveSQLException throws and the asyn
@@ -416,7 +426,7 @@ public class HiveSessionImpl implements HiveSession {
     OperationHandle opHandle = operation.getHandle();
     try {
       operation.run();
-      opHandleSet.add(opHandle);
+      addOpHandle(opHandle);
       return opHandle;
     } catch (HiveSQLException e) {
       operationManager.closeOperation(opHandle);
@@ -436,7 +446,7 @@ public class HiveSessionImpl implements HiveSession {
     OperationHandle opHandle = operation.getHandle();
     try {
       operation.run();
-      opHandleSet.add(opHandle);
+      addOpHandle(opHandle);
       return opHandle;
     } catch (HiveSQLException e) {
       operationManager.closeOperation(opHandle);
@@ -457,7 +467,7 @@ public class HiveSessionImpl implements HiveSession {
     OperationHandle opHandle = operation.getHandle();
     try {
       operation.run();
-      opHandleSet.add(opHandle);
+      addOpHandle(opHandle);
       return opHandle;
     } catch (HiveSQLException e) {
       operationManager.closeOperation(opHandle);
@@ -479,7 +489,7 @@ public class HiveSessionImpl implements HiveSession {
     OperationHandle opHandle = operation.getHandle();
     try {
       operation.run();
-      opHandleSet.add(opHandle);
+      addOpHandle(opHandle);
       return opHandle;
     } catch (HiveSQLException e) {
       operationManager.closeOperation(opHandle);
@@ -499,7 +509,7 @@ public class HiveSessionImpl implements HiveSession {
     OperationHandle opHandle = operation.getHandle();
     try {
       operation.run();
-      opHandleSet.add(opHandle);
+      addOpHandle(opHandle);
       return opHandle;
     } catch (HiveSQLException e) {
       operationManager.closeOperation(opHandle);
@@ -524,7 +534,7 @@ public class HiveSessionImpl implements HiveSession {
     OperationHandle opHandle = operation.getHandle();
     try {
       operation.run();
-      opHandleSet.add(opHandle);
+      addOpHandle(opHandle);
       return opHandle;
     } catch (HiveSQLException e) {
       operationManager.closeOperation(opHandle);
@@ -534,6 +544,12 @@ public class HiveSessionImpl implements HiveSession {
     }
   }
 
+  private void addOpHandle(OperationHandle opHandle) {
+    synchronized (opHandleSet) {
+      opHandleSet.add(opHandle);
+    }
+  }
+
   @Override
   public OperationHandle getFunctions(String catalogName, String schemaName, String functionName)
       throws HiveSQLException {
@@ -545,7 +561,7 @@ public class HiveSessionImpl implements HiveSession {
     OperationHandle opHandle = operation.getHandle();
     try {
       operation.run();
-      opHandleSet.add(opHandle);
+      addOpHandle(opHandle);
       return opHandle;
     } catch (HiveSQLException e) {
       operationManager.closeOperation(opHandle);
@@ -560,10 +576,14 @@ public class HiveSessionImpl implements HiveSession {
     try {
       acquire(true);
       // Iterate through the opHandles and close their operations
-      for (OperationHandle opHandle : opHandleSet) {
+      List<OperationHandle> ops = null;
+      synchronized (opHandleSet) {
+        ops = new ArrayList<>(opHandleSet);
+        opHandleSet.clear();
+      }
+      for (OperationHandle opHandle : ops) {
         operationManager.closeOperation(opHandle);
       }
-      opHandleSet.clear();
       // Cleanup session log directory.
       cleanupSessionLogDir();
       HiveHistory hiveHist = sessionState.getHiveHistory();
@@ -630,7 +650,10 @@ public class HiveSessionImpl implements HiveSession {
 
   @Override
   public void closeExpiredOperations() {
-    OperationHandle[] handles = opHandleSet.toArray(new OperationHandle[opHandleSet.size()]);
+    OperationHandle[] handles;
+    synchronized (opHandleSet) {
+      handles = opHandleSet.toArray(new OperationHandle[opHandleSet.size()]);
+    }
     if (handles.length > 0) {
       List<Operation> operations = operationManager.removeExpiredOperations(handles);
       if (!operations.isEmpty()) {
@@ -648,7 +671,9 @@ public class HiveSessionImpl implements HiveSession {
     acquire(false);
     try {
       for (Operation operation : operations) {
-        opHandleSet.remove(operation.getHandle());
+        synchronized (opHandleSet) {
+          opHandleSet.remove(operation.getHandle());
+        }
         try {
           operation.close();
         } catch (Exception e) {
@@ -675,7 +700,9 @@ public class HiveSessionImpl implements HiveSession {
     acquire(true);
     try {
       operationManager.closeOperation(opHandle);
-      opHandleSet.remove(opHandle);
+      synchronized (opHandleSet) {
+        opHandleSet.remove(opHandle);
+      }
     } finally {
       release(true);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java
index cd3c3f9..bf808f1 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java
@@ -37,7 +37,8 @@ import org.apache.hive.service.cli.thrift.TProtocolVersion;
 /**
  *
  * HiveSessionImplwithUGI.
- * HiveSession with connecting user's UGI and delegation token if required
+ * HiveSession with connecting user's UGI and delegation token if required.
+ * Note: this object may be shared between threads in HS2.
  */
 public class HiveSessionImplwithUGI extends HiveSessionImpl {
   public static final String HS2TOKEN = "HiveServer2ImpersonationToken";

http://git-wip-us.apache.org/repos/asf/hive/blob/be89eac6/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
index b4d517f..c73d152 100644
--- a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
+++ b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
@@ -26,9 +26,18 @@ import static org.junit.Assert.fail;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.FutureTask;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hive.service.server.HiveServer2;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -38,6 +47,7 @@ import org.junit.Test;
  *
  */
 public abstract class CLIServiceTest {
+  private static final Log LOG = LogFactory.getLog(CLIServiceTest.class);
 
   protected static CLIServiceClient client;
 
@@ -206,7 +216,7 @@ public abstract class CLIServiceTest {
         HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT, TimeUnit.MILLISECONDS);
     queryString = "SELECT NON_EXISTING_COLUMN FROM " + tableName;
     try {
-      runQueryAsync(sessionHandle, queryString, confOverlay, OperationState.ERROR, longPollingTimeout);
+      runAsyncAndWait(sessionHandle, queryString, confOverlay, OperationState.ERROR, longPollingTimeout);
     }
     catch (HiveSQLException e) {
       // expected error
@@ -218,7 +228,7 @@ public abstract class CLIServiceTest {
      * Also check that the sqlState and errorCode should be set
      */
     queryString = "CREATE TABLE NON_EXISTING_TAB (ID STRING) location 'invalid://localhost:10000/a/b/c'";
-    opStatus = runQueryAsync(sessionHandle, queryString, confOverlay, OperationState.ERROR, longPollingTimeout);
+    opStatus = runAsyncAndWait(sessionHandle, queryString, confOverlay, OperationState.ERROR, longPollingTimeout);
     // sqlState, errorCode should be set
     assertEquals(opStatus.getOperationException().getSQLState(), "08S01");
     assertEquals(opStatus.getOperationException().getErrorCode(), 1);
@@ -226,21 +236,21 @@ public abstract class CLIServiceTest {
      * Execute an async query with default config
      */
     queryString = "SELECT ID+1 FROM " + tableName;
-    runQueryAsync(sessionHandle, queryString, confOverlay, OperationState.FINISHED, longPollingTimeout);
+    runAsyncAndWait(sessionHandle, queryString, confOverlay, OperationState.FINISHED, longPollingTimeout);
 
     /**
      * Execute an async query with long polling timeout set to 0
      */
     longPollingTimeout = 0;
     queryString = "SELECT ID+1 FROM " + tableName;
-    runQueryAsync(sessionHandle, queryString, confOverlay, OperationState.FINISHED, longPollingTimeout);
+    runAsyncAndWait(sessionHandle, queryString, confOverlay, OperationState.FINISHED, longPollingTimeout);
 
     /**
      * Execute an async query with long polling timeout set to 500 millis
      */
     longPollingTimeout = 500;
     queryString = "SELECT ID+1 FROM " + tableName;
-    runQueryAsync(sessionHandle, queryString, confOverlay, OperationState.FINISHED, longPollingTimeout);
+    runAsyncAndWait(sessionHandle, queryString, confOverlay, OperationState.FINISHED, longPollingTimeout);
 
     /**
      * Cancellation test
@@ -259,6 +269,92 @@ public abstract class CLIServiceTest {
     client.closeSession(sessionHandle);
   }
 
+
+  private void syncThreadStart(final CountDownLatch cdlIn, final CountDownLatch cdlOut) {
+    cdlIn.countDown();
+    try {
+      cdlOut.await();
+    } catch (InterruptedException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Test
+  public void testExecuteStatementParallel() throws Exception {
+    Map<String, String> confOverlay = new HashMap<String, String>();
+    String tableName = "TEST_EXEC_PARALLEL";
+    String columnDefinitions = "(ID STRING)";
+
+    // Open a session and set up the test data
+    SessionHandle sessionHandle = setupTestData(tableName, columnDefinitions, confOverlay);
+    assertNotNull(sessionHandle);
+
+    long longPollingTimeout = HiveConf.getTimeVar(new HiveConf(),
+        HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT, TimeUnit.MILLISECONDS);
+    confOverlay.put(
+        HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT.varname, longPollingTimeout + "ms");
+
+    int THREAD_COUNT = 10, QUERY_COUNT = 10;
+    // TODO: refactor this into an utility, LLAP tests use this pattern a lot
+    ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
+    CountDownLatch cdlIn = new CountDownLatch(THREAD_COUNT), cdlOut = new CountDownLatch(1);
+    @SuppressWarnings("unchecked")
+    Callable<Void>[] cs = (Callable<Void>[])new Callable[3];
+    // Create callables with different queries.
+    String query = "SELECT ID + %1$d FROM " + tableName;
+    cs[0] = createQueryCallable(
+        query, confOverlay, longPollingTimeout, QUERY_COUNT, cdlIn, cdlOut);
+    query = "SELECT t1.ID, SUM(t2.ID) + %1$d FROM  " + tableName + " t1 CROSS JOIN "
+        + tableName + " t2 GROUP BY t1.ID HAVING t1.ID > 1";
+    cs[1] = createQueryCallable(
+        query, confOverlay, longPollingTimeout, QUERY_COUNT, cdlIn, cdlOut);
+    query = "SELECT b.a FROM (SELECT (t1.ID + %1$d) as a , t2.* FROM  " + tableName
+        + " t1 INNER JOIN " + tableName + " t2 ON t1.ID = t2.ID WHERE t2.ID > 2) b";
+    cs[2] = createQueryCallable(
+        query, confOverlay, longPollingTimeout, QUERY_COUNT, cdlIn, cdlOut);
+
+    @SuppressWarnings("unchecked")
+    FutureTask<Void>[] tasks = (FutureTask<Void>[])new FutureTask[THREAD_COUNT];
+    for (int i = 0; i < THREAD_COUNT; ++i) {
+      tasks[i] = new FutureTask<Void>(cs[i % cs.length]);
+      executor.execute(tasks[i]);
+    }
+    try {
+      cdlIn.await(); // Wait for all threads to be ready.
+      cdlOut.countDown(); // Release them at the same time.
+      for (int i = 0; i < THREAD_COUNT; ++i) {
+        tasks[i].get();
+      }
+    } catch (Throwable t) {
+      throw new RuntimeException(t);
+    }
+
+    // Cleanup
+    client.executeStatement(sessionHandle, "DROP TABLE " + tableName, confOverlay);
+    client.closeSession(sessionHandle);
+  }
+
+  private Callable<Void> createQueryCallable(final String queryStringFormat,
+      final Map<String, String> confOverlay, final long longPollingTimeout,
+      final int queryCount, final CountDownLatch cdlIn, final CountDownLatch cdlOut) {
+    return new Callable<Void>() {
+      public Void call() throws Exception {
+        syncThreadStart(cdlIn, cdlOut);
+        SessionHandle sessionHandle = openSession(confOverlay);
+        OperationHandle[] hs  = new OperationHandle[queryCount];
+        for (int i = 0; i < hs.length; ++i) {
+          String queryString = String.format(queryStringFormat, i);
+          LOG.info("Submitting " + i);
+          hs[i] = client.executeStatementAsync(sessionHandle, queryString, confOverlay);
+        }
+        for (int i = hs.length - 1; i >= 0; --i) {
+          waitForAsyncQuery(hs[i], OperationState.FINISHED, longPollingTimeout);
+        }
+        return null;
+      }
+    };
+  }
+
   /**
    * Sets up a test specific table with the given column definitions and config
    * @param tableName
@@ -268,13 +364,27 @@ public abstract class CLIServiceTest {
    */
   private SessionHandle setupTestData(String tableName, String columnDefinitions,
       Map<String, String> confOverlay) throws Exception {
+    SessionHandle sessionHandle = openSession(confOverlay);
+    createTestTable(tableName, columnDefinitions, confOverlay, sessionHandle);
+    return sessionHandle;
+  }
+
+  private SessionHandle openSession(Map<String, String> confOverlay)
+      throws HiveSQLException {
     SessionHandle sessionHandle = client.openSession("tom", "password", confOverlay);
     assertNotNull(sessionHandle);
+    SessionState.get().setIsHiveServerQuery(true); // Pretend we are in HS2.
 
     String queryString = "SET " + HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname
         + " = false";
     client.executeStatement(sessionHandle, queryString, confOverlay);
+    return sessionHandle;
+  }
 
+  private void createTestTable(String tableName, String columnDefinitions,
+      Map<String, String> confOverlay, SessionHandle sessionHandle)
+      throws HiveSQLException {
+    String queryString;
     // Drop the table if it exists
     queryString = "DROP TABLE IF EXISTS " + tableName;
     client.executeStatement(sessionHandle, queryString, confOverlay);
@@ -282,22 +392,27 @@ public abstract class CLIServiceTest {
     // Create a test table
     queryString = "CREATE TABLE " + tableName + columnDefinitions;
     client.executeStatement(sessionHandle, queryString, confOverlay);
-
-    return sessionHandle;
   }
 
-  private OperationStatus runQueryAsync(SessionHandle sessionHandle, String queryString,
+  private OperationStatus runAsyncAndWait(SessionHandle sessionHandle, String queryString,
       Map<String, String> confOverlay, OperationState expectedState,
       long longPollingTimeout) throws HiveSQLException {
     // Timeout for the iteration in case of asynchronous execute
+    confOverlay.put(
+        HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT.varname, longPollingTimeout + "ms");
+    OperationHandle h = client.executeStatementAsync(sessionHandle, queryString, confOverlay);
+    return waitForAsyncQuery(h, expectedState, longPollingTimeout);
+  }
+
+
+  private OperationStatus waitForAsyncQuery(OperationHandle opHandle,
+      OperationState expectedState, long longPollingTimeout) throws HiveSQLException {
     long testIterationTimeout = System.currentTimeMillis() + 100000;
     long longPollingStart;
     long longPollingEnd;
     long longPollingTimeDelta;
     OperationStatus opStatus = null;
     OperationState state = null;
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT.varname, longPollingTimeout + "ms");
-    OperationHandle opHandle = client.executeStatementAsync(sessionHandle, queryString, confOverlay);
     int count = 0;
     while (true) {
       // Break if iteration times out


[49/50] [abbrv] hive git commit: HIVE-11136 - Unused Logger in org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount (Yongzhi Chen, reviewed by Chao Sun)

Posted by xu...@apache.org.
HIVE-11136 - Unused Logger in org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount (Yongzhi Chen, reviewed by Chao Sun)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e61a1a94
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e61a1a94
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e61a1a94

Branch: refs/heads/beeline-cli
Commit: e61a1a9412ea15da72fb7645112f7f7572688ece
Parents: d6ec52e
Author: Yongzhi Chen <yo...@hotmail.com>
Authored: Wed Jul 15 18:27:55 2015 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Wed Jul 15 18:27:55 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java      | 5 -----
 .../hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java       | 5 -----
 .../org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java | 4 ----
 .../hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java       | 4 ----
 .../apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java  | 4 ----
 .../apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java | 4 ----
 6 files changed, 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e61a1a94/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java
index b10c4ab..156d19b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectList.java
@@ -18,20 +18,15 @@
 
 package org.apache.hadoop.hive.ql.udf.generic;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMkCollectionEvaluator.BufferType;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 
 @Description(name = "collect_list", value = "_FUNC_(x) - Returns a list of objects with duplicates")
 public class GenericUDAFCollectList extends AbstractGenericUDAFResolver {
 
-  static final Log LOG = LogFactory.getLog(GenericUDAFCollectList.class.getName());
-
   public GenericUDAFCollectList() {
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e61a1a94/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java
index 312a698..0c2cf90 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java
@@ -17,13 +17,10 @@
  */
 package org.apache.hadoop.hive.ql.udf.generic;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMkCollectionEvaluator.BufferType;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 
 /**
@@ -32,8 +29,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 @Description(name = "collect_set", value = "_FUNC_(x) - Returns a set of objects with duplicate elements eliminated")
 public class GenericUDAFCollectSet extends AbstractGenericUDAFResolver {
 
-  static final Log LOG = LogFactory.getLog(GenericUDAFCollectSet.class.getName());
-
   public GenericUDAFCollectSet() {
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e61a1a94/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java
index d47e7f9..eaf112e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hive.ql.udf.generic;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -44,8 +42,6 @@ import org.apache.hadoop.io.LongWritable;
           +        "which the supplied expression(s) are unique and non-NULL.")
 public class GenericUDAFCount implements GenericUDAFResolver2 {
 
-  private static final Log LOG = LogFactory.getLog(GenericUDAFCount.class.getName());
-
   @Override
   public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters)
       throws SemanticException {

http://git-wip-us.apache.org/repos/asf/hive/blob/e61a1a94/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
index f1017be..8221c1b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hive.ql.udf.generic;
 
 import java.util.ArrayList;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -64,8 +62,6 @@ import org.apache.hadoop.io.LongWritable;
         + "where neither x nor y is null.")
 public class GenericUDAFCovariance extends AbstractGenericUDAFResolver {
 
-  static final Log LOG = LogFactory.getLog(GenericUDAFCovariance.class.getName());
-
   @Override
   public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException {
     if (parameters.length != 2) {

http://git-wip-us.apache.org/repos/asf/hive/blob/e61a1a94/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java
index bc93204..611966c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.hive.ql.udf.generic;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.WindowFunctionDescription;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -49,8 +47,6 @@ import org.apache.hadoop.io.IntWritable;
 )
 public class GenericUDAFCumeDist extends GenericUDAFRank {
 
-  static final Log LOG = LogFactory.getLog(GenericUDAFCumeDist.class.getName());
-
   @Override
   protected GenericUDAFAbstractRankEvaluator createEvaluator() {
     return new GenericUDAFCumeDistEvaluator();

http://git-wip-us.apache.org/repos/asf/hive/blob/e61a1a94/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java
index 50ee4ef..715454b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.udf.generic;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.WindowFunctionDescription;
 
@@ -39,8 +37,6 @@ import org.apache.hadoop.hive.ql.exec.WindowFunctionDescription;
 )
 public class GenericUDAFDenseRank extends GenericUDAFRank {
 
-  static final Log LOG = LogFactory.getLog(GenericUDAFDenseRank.class.getName());
-
   @Override
   protected GenericUDAFAbstractRankEvaluator createEvaluator() {
     return new GenericUDAFDenseRankEvaluator();


[45/50] [abbrv] hive git commit: HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/describe_syntax.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/describe_syntax.q.out b/ql/src/test/results/clientpositive/describe_syntax.q.out
index cb6f40e..23ca546 100644
--- a/ql/src/test/results/clientpositive/describe_syntax.q.out
+++ b/ql/src/test/results/clientpositive/describe_syntax.q.out
@@ -95,7 +95,6 @@ part                	string
 # Detailed Table Information	 	 
 Database:           	db1                 	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -168,7 +167,6 @@ part                	string
 # Detailed Table Information	 	 
 Database:           	db1                 	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -322,8 +320,6 @@ Partition Value:    	[4, 5]
 Database:           	db1                 	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 #### A masked pattern was here ####
 	 	 
@@ -395,8 +391,6 @@ Partition Value:    	[4, 5]
 Database:           	db1                 	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 #### A masked pattern was here ####
 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/describe_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/describe_table.q.out b/ql/src/test/results/clientpositive/describe_table.q.out
index 68003cc..fc06a1f 100644
--- a/ql/src/test/results/clientpositive/describe_table.q.out
+++ b/ql/src/test/results/clientpositive/describe_table.q.out
@@ -182,7 +182,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -230,8 +229,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	srcpart             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -270,7 +267,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -318,8 +314,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	srcpart             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -398,7 +392,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
index 4c8ddd3..216a79c 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
@@ -889,8 +889,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part_orc     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -933,8 +931,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_orc     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -977,8 +973,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part_limit_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -1021,8 +1015,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_limit_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -1064,8 +1056,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1107,8 +1097,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1150,8 +1138,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1193,8 +1179,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1711,8 +1695,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part2_orc    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1755,8 +1737,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part2_orc    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1860,8 +1840,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part2_orc    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1904,8 +1882,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part2_orc    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2154,8 +2130,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2197,8 +2171,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2368,8 +2340,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2411,8 +2381,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2_orc	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
index 9e947bb..41049bd 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
@@ -794,8 +794,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -838,8 +836,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -882,8 +878,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part_limit   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -926,8 +920,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_limit   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -969,8 +961,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1012,8 +1002,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1055,8 +1043,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1098,8 +1084,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
@@ -1611,8 +1595,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1655,8 +1637,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1760,8 +1740,6 @@ Partition Value:    	[foo, 27]
 Database:           	default             	 
 Table:              	over1k_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1804,8 +1782,6 @@ Partition Value:    	[foo, __HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2052,8 +2028,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2095,8 +2069,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2199,8 +2171,6 @@ Partition Value:    	[27]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2242,8 +2212,6 @@ Partition Value:    	[__HIVE_DEFAULT_PARTITION__]
 Database:           	default             	 
 Table:              	over1k_part_buck_sort2	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
index 4dda248..cb0eb58 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
@@ -187,8 +187,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -249,8 +247,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -408,8 +404,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -470,8 +464,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -650,8 +642,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -712,8 +702,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -870,8 +858,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -932,8 +918,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1168,8 +1152,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part_orc         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1230,8 +1212,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part_orc         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1389,8 +1369,6 @@ Partition Value:    	[2452617]
 Database:           	default             	 
 Table:              	ss_part_orc         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1451,8 +1429,6 @@ Partition Value:    	[2452638]
 Database:           	default             	 
 Table:              	ss_part_orc         	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/encrypted/encryption_insert_values.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_values.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_values.q.out
index 888a612..c1cbf30 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_insert_values.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_values.q.out
@@ -54,7 +54,6 @@ tmp_values_col2     	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table/.hive-staging
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/exim_hidden_files.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exim_hidden_files.q.out b/ql/src/test/results/clientpositive/exim_hidden_files.q.out
index e449e0e..8076148 100644
--- a/ql/src/test/results/clientpositive/exim_hidden_files.q.out
+++ b/ql/src/test/results/clientpositive/exim_hidden_files.q.out
@@ -69,7 +69,6 @@ emp_country         	string
 # Detailed Table Information	 	 
 Database:           	importer            	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/index_skewtable.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_skewtable.q.out b/ql/src/test/results/clientpositive/index_skewtable.q.out
index 348c742..10a441d 100644
--- a/ql/src/test/results/clientpositive/index_skewtable.q.out
+++ b/ql/src/test/results/clientpositive/index_skewtable.q.out
@@ -44,7 +44,6 @@ _offsets            	array<bigint>
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	INDEX_TABLE         	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
index 3566fa0..5d69e2f 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
@@ -47,8 +47,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -102,8 +100,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -157,8 +153,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -212,8 +206,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -267,8 +259,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -322,8 +312,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -377,8 +365,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -432,8 +418,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -487,8 +471,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -542,8 +524,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -597,8 +577,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -652,8 +630,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -707,8 +683,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -762,8 +736,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -817,8 +789,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -872,8 +842,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -927,8 +895,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -982,8 +948,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1037,8 +1001,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1094,8 +1056,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1151,8 +1111,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1206,8 +1164,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1261,8 +1217,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1316,8 +1270,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -1371,8 +1323,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
index e0b5866..9503897 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
@@ -49,8 +49,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_bucketed 	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
index 69df6d0..d1a6789 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
@@ -49,8 +49,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -110,8 +108,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
index 773a2a8..c2f0810 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
@@ -69,8 +69,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -111,8 +109,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -182,8 +178,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -224,8 +218,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -295,8 +287,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -337,8 +327,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -611,8 +599,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -653,8 +639,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
index 413e7b3..a621be8 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
@@ -129,8 +129,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -190,8 +188,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out_2    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -317,8 +313,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -378,8 +372,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out_2    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -505,8 +497,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -566,8 +556,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out_2    	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out
index 4879cc4..40c2bf3 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out
@@ -59,8 +59,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	list_bucketing_table	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -135,8 +133,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	list_bucketing_table2	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
index 703e52c..6aef463 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
@@ -185,8 +185,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -348,8 +346,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -503,8 +499,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -631,8 +625,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table_out      	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
index de62198..4ed6d23 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
@@ -49,8 +49,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -104,8 +102,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
index c7332ea..523c105 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
@@ -57,8 +57,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -98,8 +96,6 @@ Partition Value:    	[2]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -161,8 +157,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -202,8 +196,6 @@ Partition Value:    	[2]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -265,8 +257,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -306,8 +296,6 @@ Partition Value:    	[2]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -369,8 +357,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -410,8 +396,6 @@ Partition Value:    	[2]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
index 9a3ec57..ce31548 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
@@ -148,8 +148,6 @@ Partition Value:    	[2008-04-08, 0]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -190,8 +188,6 @@ Partition Value:    	[2008-04-08, 1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
index d5910ee..7b002f5 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
@@ -49,8 +49,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -104,8 +102,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -159,8 +155,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -214,8 +208,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -269,8 +261,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	16                  
@@ -326,8 +316,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_table          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/lb_fs_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lb_fs_stats.q.out b/ql/src/test/results/clientpositive/lb_fs_stats.q.out
index 9461714..e09c406 100644
--- a/ql/src/test/results/clientpositive/lb_fs_stats.q.out
+++ b/ql/src/test/results/clientpositive/lb_fs_stats.q.out
@@ -53,8 +53,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	test_tab            	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
index 7cce327..067ac5d 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
@@ -282,8 +282,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -329,8 +327,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.7.out
index cfaadd8..1bd4755 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.7.out
@@ -364,8 +364,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.8.out
index f872301..d1b9598 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.8.out
@@ -364,8 +364,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
index e7b5540..8cc1370 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
@@ -238,8 +238,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
index c7eeb8b..00a6235 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
@@ -238,8 +238,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
index 5b5a35a..b0735a3 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
@@ -248,8 +248,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_mul_col	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.8.out
index 677cc7d..6d2298b 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.8.out
@@ -248,8 +248,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_mul_col	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
index dc07f10..6761092 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
@@ -248,8 +248,6 @@ Partition Value:    	[2008-04-08, 2013-01-23+18:00:99]
 Database:           	default             	 
 Table:              	list_bucketing_mul_col	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.8.out
index 71ffd0e..f7a1039 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.8.out
@@ -248,8 +248,6 @@ Partition Value:    	[2008-04-08, 2013-01-23+18:00:99]
 Database:           	default             	 
 Table:              	list_bucketing_mul_col	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
index 27dcb9e..1519b96 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
@@ -190,7 +190,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
index c52c7d8..59bb498 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
@@ -312,8 +312,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.8.out
index 685511f..aeeba03 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.8.out
@@ -312,8 +312,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
index 0317ad2..fad6cb9 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
@@ -271,8 +271,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
index 976fe63..23d6896 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
@@ -320,8 +320,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   
@@ -723,8 +721,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.8.out
index a79d478..088639b 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.8.out
@@ -320,8 +320,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   
@@ -723,8 +721,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
index c91fd5f..ce8bb4a 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
@@ -286,8 +286,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -333,8 +331,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.8.out
index 831b337..1c33382 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.8.out
@@ -286,8 +286,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -333,8 +331,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
index 883f086..d223234 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
@@ -380,8 +380,6 @@ Partition Value:    	[2008-04-08, a1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -425,8 +423,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   
@@ -848,8 +844,6 @@ Partition Value:    	[2008-04-08, a1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -893,8 +887,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
index e968402..f884ace 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
@@ -380,8 +380,6 @@ Partition Value:    	[2008-04-08, a1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -425,8 +423,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   
@@ -848,8 +844,6 @@ Partition Value:    	[2008-04-08, a1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -893,8 +887,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
index 2a8efc4..541944d 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
@@ -326,8 +326,6 @@ Partition Value:    	[2008-04-08, a1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -371,8 +369,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -794,8 +790,6 @@ Partition Value:    	[2008-04-08, a1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -839,8 +833,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out
index a9522e0..fc22118 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out
@@ -382,8 +382,6 @@ Partition Value:    	[2008-04-08, a1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -427,8 +425,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   
@@ -484,8 +480,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
index 41979fc..9947c1a 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
@@ -382,8 +382,6 @@ Partition Value:    	[2008-04-08, a1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -427,8 +425,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   
@@ -484,8 +480,6 @@ Partition Value:    	[2008-04-08, b1]
 Database:           	default             	 
 Table:              	list_bucketing_dynamic_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
index e62d44a..8975ec0 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
@@ -320,8 +320,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   
@@ -723,8 +721,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.8.out
index 0178e64..a9bf61a 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.8.out
@@ -320,8 +320,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	6                   
@@ -723,8 +721,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	list_bucketing_static_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out
index b6c5b4c..7c4b70c 100644
--- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out
@@ -74,8 +74,6 @@ Partition Value:    	[1, 4]
 Database:           	default             	 
 Table:              	fact_daily          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out
index 6d4e165..7f32108 100644
--- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out
@@ -74,8 +74,6 @@ Partition Value:    	[1, 4]
 Database:           	default             	 
 Table:              	fact_daily          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
index 3c0f5e8..753729f 100644
--- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
@@ -74,8 +74,6 @@ Partition Value:    	[1, 1]
 Database:           	default             	 
 Table:              	fact_daily          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -138,8 +136,6 @@ Partition Value:    	[1, 2]
 Database:           	default             	 
 Table:              	fact_daily          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -207,8 +203,6 @@ Partition Value:    	[1, 3]
 Database:           	default             	 
 Table:              	fact_daily          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   


[13/50] [abbrv] hive git commit: HIVE-11131: Get row information on DataWritableWriter once for better writing performance (Sergio Pena, reviewed by Ferdinand Xu, Dong Chen & Ryan Blue)

Posted by xu...@apache.org.
HIVE-11131: Get row information on DataWritableWriter once for better writing performance (Sergio Pena, reviewed by Ferdinand Xu, Dong Chen & Ryan Blue)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/82074894
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/82074894
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/82074894

Branch: refs/heads/beeline-cli
Commit: 8207489451debe98051898df6b1ec0833fc80bb3
Parents: a2dabcb
Author: Sergio Pena <se...@cloudera.com>
Authored: Thu Jul 9 16:37:10 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Thu Jul 9 16:37:10 2015 -0500

----------------------------------------------------------------------
 .../ql/io/parquet/write/DataWritableWriter.java | 638 +++++++++++++------
 1 file changed, 426 insertions(+), 212 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/82074894/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java
index c195c3e..493cd36 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java
@@ -20,8 +20,26 @@ import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
 import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTimeUtils;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.apache.hadoop.hive.serde2.io.ParquetHiveRecord;
-import org.apache.hadoop.hive.serde2.objectinspector.*;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.*;
+import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveCharObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 import org.apache.parquet.io.api.Binary;
 import org.apache.parquet.io.api.RecordConsumer;
@@ -36,87 +54,104 @@ import java.util.Map;
 
 /**
  *
- * DataWritableWriter is a writer that reads a ParquetWritable object and send the data to the Parquet
- * API with the expected schema. This class is only used through DataWritableWriteSupport class.
+ * DataWritableWriter sends a record to the Parquet API with the expected schema in order
+ * to be written to a file.
+ * This class is only used through DataWritableWriteSupport class.
  */
 public class DataWritableWriter {
   private static final Log LOG = LogFactory.getLog(DataWritableWriter.class);
-  private final RecordConsumer recordConsumer;
+  protected final RecordConsumer recordConsumer;
   private final GroupType schema;
 
+  /* This writer will be created when writing the first row in order to get
+  information about how to inspect the record data.  */
+  private DataWriter messageWriter;
+
   public DataWritableWriter(final RecordConsumer recordConsumer, final GroupType schema) {
     this.recordConsumer = recordConsumer;
     this.schema = schema;
   }
 
   /**
-   * It writes all record values to the Parquet RecordConsumer.
-   * @param record Contains the record that are going to be written.
+   * It writes a record to Parquet.
+   * @param record Contains the record that is going to be written.
    */
   public void write(final ParquetHiveRecord record) {
     if (record != null) {
-      recordConsumer.startMessage();
-      try {
-        writeGroupFields(record.getObject(), record.getObjectInspector(), schema);
-      } catch (RuntimeException e) {
-        String errorMessage = "Parquet record is malformed: " + e.getMessage();
-        LOG.error(errorMessage, e);
-        throw new RuntimeException(errorMessage, e);
+      if (messageWriter == null) {
+        try {
+          messageWriter = createMessageWriter(record.getObjectInspector(), schema);
+        } catch (RuntimeException e) {
+          String errorMessage = "Parquet record is malformed: " + e.getMessage();
+          LOG.error(errorMessage, e);
+          throw new RuntimeException(errorMessage, e);
+        }
       }
-      recordConsumer.endMessage();
+
+      messageWriter.write(record.getObject());
     }
   }
 
-  /**
-   * It writes all the fields contained inside a group to the RecordConsumer.
-   * @param value The list of values contained in the group.
-   * @param inspector The object inspector used to get the correct value type.
-   * @param type Type that contains information about the group schema.
-   */
-  private void writeGroupFields(final Object value, final StructObjectInspector inspector, final GroupType type) {
-    if (value != null) {
-      List<? extends StructField> fields = inspector.getAllStructFieldRefs();
-      List<Object> fieldValuesList = inspector.getStructFieldsDataAsList(value);
-
-      for (int i = 0; i < type.getFieldCount(); i++) {
-        Type fieldType = type.getType(i);
-        String fieldName = fieldType.getName();
-        Object fieldValue = fieldValuesList.get(i);
-
-        if (fieldValue != null) {
-          ObjectInspector fieldInspector = fields.get(i).getFieldObjectInspector();
-          recordConsumer.startField(fieldName, i);
-          writeValue(fieldValue, fieldInspector, fieldType);
-          recordConsumer.endField(fieldName, i);
-        }
-      }
-    }
+  private MessageDataWriter createMessageWriter(StructObjectInspector inspector, GroupType schema) {
+    return new MessageDataWriter(inspector, schema);
   }
 
   /**
-   * It writes the field value to the Parquet RecordConsumer. It detects the field type, and calls
-   * the correct write function.
-   * @param value The writable object that contains the value.
+   * Creates a writer for the specific object inspector. The returned writer will be used
+   * to call Parquet API for the specific data type.
    * @param inspector The object inspector used to get the correct value type.
    * @param type Type that contains information about the type schema.
+   * @return A ParquetWriter object used to call the Parquet API fo the specific data type.
    */
-  private void writeValue(final Object value, final ObjectInspector inspector, final Type type) {
+  private DataWriter createWriter(ObjectInspector inspector, Type type) {
     if (type.isPrimitive()) {
       checkInspectorCategory(inspector, ObjectInspector.Category.PRIMITIVE);
-      writePrimitive(value, (PrimitiveObjectInspector)inspector);
+      PrimitiveObjectInspector primitiveInspector = (PrimitiveObjectInspector)inspector;
+      switch (primitiveInspector.getPrimitiveCategory()) {
+        case BOOLEAN:
+          return new BooleanDataWriter((BooleanObjectInspector)inspector);
+        case BYTE:
+          return new ByteDataWriter((ByteObjectInspector)inspector);
+        case SHORT:
+          return new ShortDataWriter((ShortObjectInspector)inspector);
+        case INT:
+          return new IntDataWriter((IntObjectInspector)inspector);
+        case LONG:
+          return new LongDataWriter((LongObjectInspector)inspector);
+        case FLOAT:
+          return new FloatDataWriter((FloatObjectInspector)inspector);
+        case DOUBLE:
+          return new DoubleDataWriter((DoubleObjectInspector)inspector);
+        case STRING:
+          return new StringDataWriter((StringObjectInspector)inspector);
+        case CHAR:
+          return new CharDataWriter((HiveCharObjectInspector)inspector);
+        case VARCHAR:
+          return new VarcharDataWriter((HiveVarcharObjectInspector)inspector);
+        case BINARY:
+          return new BinaryDataWriter((BinaryObjectInspector)inspector);
+        case TIMESTAMP:
+          return new TimestampDataWriter((TimestampObjectInspector)inspector);
+        case DECIMAL:
+          return new DecimalDataWriter((HiveDecimalObjectInspector)inspector);
+        case DATE:
+          return new DateDataWriter((DateObjectInspector)inspector);
+        default:
+          throw new IllegalArgumentException("Unsupported primitive data type: " + primitiveInspector.getPrimitiveCategory());
+      }
     } else {
       GroupType groupType = type.asGroupType();
       OriginalType originalType = type.getOriginalType();
 
       if (originalType != null && originalType.equals(OriginalType.LIST)) {
         checkInspectorCategory(inspector, ObjectInspector.Category.LIST);
-        writeArray(value, (ListObjectInspector)inspector, groupType);
+        return new ListDataWriter((ListObjectInspector)inspector, groupType);
       } else if (originalType != null && originalType.equals(OriginalType.MAP)) {
         checkInspectorCategory(inspector, ObjectInspector.Category.MAP);
-        writeMap(value, (MapObjectInspector)inspector, groupType);
+        return new MapDataWriter((MapObjectInspector)inspector, groupType);
       } else {
         checkInspectorCategory(inspector, ObjectInspector.Category.STRUCT);
-        writeGroup(value, (StructObjectInspector)inspector, groupType);
+        return new StructDataWriter((StructObjectInspector)inspector, groupType);
       }
     }
   }
@@ -134,203 +169,382 @@ public class DataWritableWriter {
     }
   }
 
-  /**
-   * It writes a group type and all its values to the Parquet RecordConsumer.
-   * This is used only for optional and required groups.
-   * @param value Object that contains the group values.
-   * @param inspector The object inspector used to get the correct value type.
-   * @param type Type that contains information about the group schema.
-   */
-  private void writeGroup(final Object value, final StructObjectInspector inspector, final GroupType type) {
-    recordConsumer.startGroup();
-    writeGroupFields(value, inspector, type);
-    recordConsumer.endGroup();
+  private interface DataWriter {
+    void write(Object value);
   }
 
-  /**
-   * It writes a list type and its array elements to the Parquet RecordConsumer.
-   * This is called when the original type (LIST) is detected by writeValue()/
-   * This function assumes the following schema:
-   *    optional group arrayCol (LIST) {
-   *      repeated group array {
-   *        optional TYPE array_element;
-   *      }
-   *    }
-   * @param value The object that contains the array values.
-   * @param inspector The object inspector used to get the correct value type.
-   * @param type Type that contains information about the group (LIST) schema.
-   */
-  private void writeArray(final Object value, final ListObjectInspector inspector, final GroupType type) {
-    // Get the internal array structure
-    GroupType repeatedType = type.getType(0).asGroupType();
+  private class GroupDataWriter implements DataWriter {
+    private StructObjectInspector inspector;
+    private List<? extends StructField> structFields;
+    private DataWriter[] structWriters;
 
-    recordConsumer.startGroup();
-    recordConsumer.startField(repeatedType.getName(), 0);
+    public GroupDataWriter(StructObjectInspector inspector, GroupType groupType) {
+      this.inspector = inspector;
 
-    List<?> arrayValues = inspector.getList(value);
-    ObjectInspector elementInspector = inspector.getListElementObjectInspector();
+      structFields = this.inspector.getAllStructFieldRefs();
+      structWriters = new DataWriter[structFields.size()];
 
-    Type elementType = repeatedType.getType(0);
-    String elementName = elementType.getName();
+      for (int i = 0; i < structFields.size(); i++) {
+        StructField field = structFields.get(i);
+        structWriters[i] = createWriter(field.getFieldObjectInspector(), groupType.getType(i));
+      }
+    }
 
-    for (Object element : arrayValues) {
-      recordConsumer.startGroup();
-      if (element != null) {
-        recordConsumer.startField(elementName, 0);
-        writeValue(element, elementInspector, elementType);
-        recordConsumer.endField(elementName, 0);
+    @Override
+    public void write(Object value) {
+      for (int i = 0; i < structFields.size(); i++) {
+        StructField field = structFields.get(i);
+        Object fieldValue = inspector.getStructFieldData(value, field);
+
+        if (fieldValue != null) {
+          String fieldName = field.getFieldName();
+          DataWriter writer = structWriters[i];
+
+          recordConsumer.startField(fieldName, i);
+          writer.write(fieldValue);
+          recordConsumer.endField(fieldName, i);
+        }
       }
-      recordConsumer.endGroup();
+    }
+  }
+
+  private class MessageDataWriter extends GroupDataWriter implements DataWriter {
+    public MessageDataWriter(StructObjectInspector inspector, GroupType groupType) {
+      super(inspector, groupType);
     }
 
-    recordConsumer.endField(repeatedType.getName(), 0);
-    recordConsumer.endGroup();
+    @Override
+    public void write(Object value) {
+      recordConsumer.startMessage();
+      if (value != null) {
+        super.write(value);
+      }
+      recordConsumer.endMessage();
+    }
   }
 
-  /**
-   * It writes a map type and its key-pair values to the Parquet RecordConsumer.
-   * This is called when the original type (MAP) is detected by writeValue().
-   * This function assumes the following schema:
-   *    optional group mapCol (MAP) {
-   *      repeated group map (MAP_KEY_VALUE) {
-   *        required TYPE key;
-   *        optional TYPE value;
-   *      }
-   *    }
-   * @param value The object that contains the map key-values.
-   * @param inspector The object inspector used to get the correct value type.
-   * @param type Type that contains information about the group (MAP) schema.
-   */
-  private void writeMap(final Object value, final MapObjectInspector inspector, final GroupType type) {
-    // Get the internal map structure (MAP_KEY_VALUE)
-    GroupType repeatedType = type.getType(0).asGroupType();
+  private class StructDataWriter extends GroupDataWriter implements DataWriter {
+    public StructDataWriter(StructObjectInspector inspector, GroupType groupType) {
+      super(inspector, groupType);
+    }
+
+    @Override
+    public void write(Object value) {
+      recordConsumer.startGroup();
+      super.write(value);
+      recordConsumer.endGroup();
+    }
+  }
+
+  private class ListDataWriter implements DataWriter {
+    private ListObjectInspector inspector;
+    private String elementName;
+    private DataWriter elementWriter;
+    private String repeatedGroupName;
 
-    recordConsumer.startGroup();
-    recordConsumer.startField(repeatedType.getName(), 0);
+    public ListDataWriter(ListObjectInspector inspector, GroupType groupType) {
+      this.inspector = inspector;
+
+      // Get the internal array structure
+      GroupType repeatedType = groupType.getType(0).asGroupType();
+      this.repeatedGroupName = repeatedType.getName();
+
+      Type elementType = repeatedType.getType(0);
+      this.elementName = elementType.getName();
+
+      ObjectInspector elementInspector = this.inspector.getListElementObjectInspector();
+      this.elementWriter = createWriter(elementInspector, elementType);
+    }
 
-    Map<?, ?> mapValues = inspector.getMap(value);
+    @Override
+    public void write(Object value) {
+      recordConsumer.startGroup();
+      recordConsumer.startField(repeatedGroupName, 0);
+
+      int listLength = inspector.getListLength(value);
+      for (int i = 0; i < listLength; i++) {
+        Object element = inspector.getListElement(value, i);
+        recordConsumer.startGroup();
+        if (element != null) {
+          recordConsumer.startField(elementName, 0);
+          elementWriter.write(element);
+          recordConsumer.endField(elementName, 0);
+        }
+        recordConsumer.endGroup();
+      }
 
-    Type keyType = repeatedType.getType(0);
-    String keyName = keyType.getName();
-    ObjectInspector keyInspector = inspector.getMapKeyObjectInspector();
+      recordConsumer.endField(repeatedGroupName, 0);
+      recordConsumer.endGroup();
+    }
+  }
 
-    Type valuetype = repeatedType.getType(1);
-    String valueName = valuetype.getName();
-    ObjectInspector valueInspector = inspector.getMapValueObjectInspector();
+  private class MapDataWriter implements DataWriter {
+    private MapObjectInspector inspector;
+    private String repeatedGroupName;
+    private String keyName, valueName;
+    private DataWriter keyWriter, valueWriter;
+
+    public MapDataWriter(MapObjectInspector inspector, GroupType groupType) {
+      this.inspector = inspector;
+
+      // Get the internal map structure (MAP_KEY_VALUE)
+      GroupType repeatedType = groupType.getType(0).asGroupType();
+      this.repeatedGroupName = repeatedType.getName();
+
+      // Get key element information
+      Type keyType = repeatedType.getType(0);
+      ObjectInspector keyInspector = this.inspector.getMapKeyObjectInspector();
+      this.keyName = keyType.getName();
+      this.keyWriter = createWriter(keyInspector, keyType);
+
+      // Get value element information
+      Type valuetype = repeatedType.getType(1);
+      ObjectInspector valueInspector = this.inspector.getMapValueObjectInspector();
+      this.valueName = valuetype.getName();
+      this.valueWriter = createWriter(valueInspector, valuetype);
+    }
 
-    for (Map.Entry<?, ?> keyValue : mapValues.entrySet()) {
+    @Override
+    public void write(Object value) {
       recordConsumer.startGroup();
-      if (keyValue != null) {
-        // write key element
-        Object keyElement = keyValue.getKey();
-        recordConsumer.startField(keyName, 0);
-        writeValue(keyElement, keyInspector, keyType);
-        recordConsumer.endField(keyName, 0);
-
-        // write value element
-        Object valueElement = keyValue.getValue();
-        if (valueElement != null) {
-          recordConsumer.startField(valueName, 1);
-          writeValue(valueElement, valueInspector, valuetype);
-          recordConsumer.endField(valueName, 1);
+      recordConsumer.startField(repeatedGroupName, 0);
+
+      Map<?, ?> mapValues = inspector.getMap(value);
+      for (Map.Entry<?, ?> keyValue : mapValues.entrySet()) {
+        recordConsumer.startGroup();
+        if (keyValue != null) {
+          // write key element
+          Object keyElement = keyValue.getKey();
+          recordConsumer.startField(keyName, 0);
+          keyWriter.write(keyElement);
+          recordConsumer.endField(keyName, 0);
+
+          // write value element
+          Object valueElement = keyValue.getValue();
+          if (valueElement != null) {
+            recordConsumer.startField(valueName, 1);
+            valueWriter.write(valueElement);
+            recordConsumer.endField(valueName, 1);
+          }
         }
+        recordConsumer.endGroup();
       }
+
+      recordConsumer.endField(repeatedGroupName, 0);
       recordConsumer.endGroup();
     }
+  }
+
+  private class BooleanDataWriter implements DataWriter {
+    private BooleanObjectInspector inspector;
+
+    public BooleanDataWriter(BooleanObjectInspector inspector) {
+      this.inspector = inspector;
+    }
 
-    recordConsumer.endField(repeatedType.getName(), 0);
-    recordConsumer.endGroup();
+    @Override
+    public void write(Object value) {
+      recordConsumer.addBoolean(inspector.get(value));
+    }
   }
 
-  /**
-   * It writes the primitive value to the Parquet RecordConsumer.
-   * @param value The object that contains the primitive value.
-   * @param inspector The object inspector used to get the correct value type.
-   */
-  private void writePrimitive(final Object value, final PrimitiveObjectInspector inspector) {
-    if (value == null) {
-      return;
-    }
-
-    switch (inspector.getPrimitiveCategory()) {
-      case VOID:
-        return;
-      case DOUBLE:
-        recordConsumer.addDouble(((DoubleObjectInspector) inspector).get(value));
-        break;
-      case BOOLEAN:
-        recordConsumer.addBoolean(((BooleanObjectInspector) inspector).get(value));
-        break;
-      case FLOAT:
-        recordConsumer.addFloat(((FloatObjectInspector) inspector).get(value));
-        break;
-      case BYTE:
-        recordConsumer.addInteger(((ByteObjectInspector) inspector).get(value));
-        break;
-      case INT:
-        recordConsumer.addInteger(((IntObjectInspector) inspector).get(value));
-        break;
-      case LONG:
-        recordConsumer.addLong(((LongObjectInspector) inspector).get(value));
-        break;
-      case SHORT:
-        recordConsumer.addInteger(((ShortObjectInspector) inspector).get(value));
-        break;
-      case STRING:
-        String v = ((StringObjectInspector) inspector).getPrimitiveJavaObject(value);
-        recordConsumer.addBinary(Binary.fromString(v));
-        break;
-      case CHAR:
-        String vChar = ((HiveCharObjectInspector) inspector).getPrimitiveJavaObject(value).getStrippedValue();
-        recordConsumer.addBinary(Binary.fromString(vChar));
-        break;
-      case VARCHAR:
-        String vVarchar = ((HiveVarcharObjectInspector) inspector).getPrimitiveJavaObject(value).getValue();
-        recordConsumer.addBinary(Binary.fromString(vVarchar));
-        break;
-      case BINARY:
-        byte[] vBinary = ((BinaryObjectInspector) inspector).getPrimitiveJavaObject(value);
-        recordConsumer.addBinary(Binary.fromByteArray(vBinary));
-        break;
-      case TIMESTAMP:
-        Timestamp ts = ((TimestampObjectInspector) inspector).getPrimitiveJavaObject(value);
-        recordConsumer.addBinary(NanoTimeUtils.getNanoTime(ts, false).toBinary());
-        break;
-      case DECIMAL:
-        HiveDecimal vDecimal = ((HiveDecimal)inspector.getPrimitiveJavaObject(value));
-        DecimalTypeInfo decTypeInfo = (DecimalTypeInfo)inspector.getTypeInfo();
-        recordConsumer.addBinary(decimalToBinary(vDecimal, decTypeInfo));
-        break;
-      case DATE:
-        Date vDate = ((DateObjectInspector) inspector).getPrimitiveJavaObject(value);
-        recordConsumer.addInteger(DateWritable.dateToDays(vDate));
-        break;
-      default:
-        throw new IllegalArgumentException("Unsupported primitive data type: " + inspector.getPrimitiveCategory());
+  private class ByteDataWriter implements DataWriter {
+    private ByteObjectInspector inspector;
+
+    public ByteDataWriter(ByteObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      recordConsumer.addInteger(inspector.get(value));
+    }
+  }
+
+  private class ShortDataWriter implements DataWriter {
+    private ShortObjectInspector inspector;
+    public ShortDataWriter(ShortObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      recordConsumer.addInteger(inspector.get(value));
+    }
+  }
+
+  private class IntDataWriter implements DataWriter {
+    private IntObjectInspector inspector;
+
+    public IntDataWriter(IntObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      recordConsumer.addInteger(inspector.get(value));
+    }
+  }
+
+  private class LongDataWriter implements DataWriter {
+    private LongObjectInspector inspector;
+
+    public LongDataWriter(LongObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      recordConsumer.addLong(inspector.get(value));
+    }
+  }
+
+  private class FloatDataWriter implements DataWriter {
+    private FloatObjectInspector inspector;
+
+    public FloatDataWriter(FloatObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      recordConsumer.addFloat(inspector.get(value));
+    }
+  }
+
+  private class DoubleDataWriter implements DataWriter {
+    private DoubleObjectInspector inspector;
+
+    public DoubleDataWriter(DoubleObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      recordConsumer.addDouble(inspector.get(value));
+    }
+  }
+
+  private class StringDataWriter implements DataWriter {
+    private StringObjectInspector inspector;
+
+    public StringDataWriter(StringObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      String v = inspector.getPrimitiveJavaObject(value);
+      recordConsumer.addBinary(Binary.fromString(v));
     }
   }
 
-  private Binary decimalToBinary(final HiveDecimal hiveDecimal, final DecimalTypeInfo decimalTypeInfo) {
-    int prec = decimalTypeInfo.precision();
-    int scale = decimalTypeInfo.scale();
-    byte[] decimalBytes = hiveDecimal.setScale(scale).unscaledValue().toByteArray();
+  private class CharDataWriter implements DataWriter {
+    private HiveCharObjectInspector inspector;
 
-    // Estimated number of bytes needed.
-    int precToBytes = ParquetHiveSerDe.PRECISION_TO_BYTE_COUNT[prec - 1];
-    if (precToBytes == decimalBytes.length) {
-      // No padding needed.
-      return Binary.fromByteArray(decimalBytes);
+    public CharDataWriter(HiveCharObjectInspector inspector) {
+      this.inspector = inspector;
     }
 
-    byte[] tgt = new byte[precToBytes];
+    @Override
+    public void write(Object value) {
+      String v = inspector.getPrimitiveJavaObject(value).getStrippedValue();
+      recordConsumer.addBinary(Binary.fromString(v));
+    }
+  }
+
+  private class VarcharDataWriter implements DataWriter {
+    private HiveVarcharObjectInspector inspector;
+
+    public VarcharDataWriter(HiveVarcharObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      String v = inspector.getPrimitiveJavaObject(value).getValue();
+      recordConsumer.addBinary(Binary.fromString(v));
+    }
+  }
+
+  private class BinaryDataWriter implements DataWriter {
+    private BinaryObjectInspector inspector;
+
+    public BinaryDataWriter(BinaryObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      byte[] vBinary = inspector.getPrimitiveJavaObject(value);
+      recordConsumer.addBinary(Binary.fromByteArray(vBinary));
+    }
+  }
+
+  private class TimestampDataWriter implements DataWriter {
+    private TimestampObjectInspector inspector;
+
+    public TimestampDataWriter(TimestampObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      Timestamp ts = inspector.getPrimitiveJavaObject(value);
+      recordConsumer.addBinary(NanoTimeUtils.getNanoTime(ts, false).toBinary());
+    }
+  }
+
+  private class DecimalDataWriter implements DataWriter {
+    private HiveDecimalObjectInspector inspector;
+
+    public DecimalDataWriter(HiveDecimalObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      HiveDecimal vDecimal = inspector.getPrimitiveJavaObject(value);
+      DecimalTypeInfo decTypeInfo = (DecimalTypeInfo)inspector.getTypeInfo();
+      recordConsumer.addBinary(decimalToBinary(vDecimal, decTypeInfo));
+    }
+
+    private Binary decimalToBinary(final HiveDecimal hiveDecimal, final DecimalTypeInfo decimalTypeInfo) {
+      int prec = decimalTypeInfo.precision();
+      int scale = decimalTypeInfo.scale();
+      byte[] decimalBytes = hiveDecimal.setScale(scale).unscaledValue().toByteArray();
+
+      // Estimated number of bytes needed.
+      int precToBytes = ParquetHiveSerDe.PRECISION_TO_BYTE_COUNT[prec - 1];
+      if (precToBytes == decimalBytes.length) {
+        // No padding needed.
+        return Binary.fromByteArray(decimalBytes);
+      }
+
+      byte[] tgt = new byte[precToBytes];
       if (hiveDecimal.signum() == -1) {
-      // For negative number, initializing bits to 1
-      for (int i = 0; i < precToBytes; i++) {
-        tgt[i] |= 0xFF;
+        // For negative number, initializing bits to 1
+        for (int i = 0; i < precToBytes; i++) {
+          tgt[i] |= 0xFF;
+        }
       }
+
+      System.arraycopy(decimalBytes, 0, tgt, precToBytes - decimalBytes.length, decimalBytes.length); // Padding leading zeroes/ones.
+      return Binary.fromByteArray(tgt);
     }
+  }
 
-    System.arraycopy(decimalBytes, 0, tgt, precToBytes - decimalBytes.length, decimalBytes.length); // Padding leading zeroes/ones.
-    return Binary.fromByteArray(tgt);
+  private class DateDataWriter implements DataWriter {
+    private DateObjectInspector inspector;
+
+    public DateDataWriter(DateObjectInspector inspector) {
+      this.inspector = inspector;
+    }
+
+    @Override
+    public void write(Object value) {
+      Date vDate = inspector.getPrimitiveJavaObject(value);
+      recordConsumer.addInteger(DateWritable.dateToDays(vDate));
+    }
   }
-}
+}
\ No newline at end of file


[31/50] [abbrv] hive git commit: HIVE-11239: Test encryption_insert_partition_static.q fails with different output results on other environments(Sergio Pena, reviewed by Ferdinand Xu)

Posted by xu...@apache.org.
HIVE-11239: Test encryption_insert_partition_static.q fails with different output results on other environments(Sergio Pena, reviewed by Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a65bcbdf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a65bcbdf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a65bcbdf

Branch: refs/heads/beeline-cli
Commit: a65bcbdf463903a5a9650693d597a4b711abea2f
Parents: 21aecbc
Author: Ferdinand Xu <ch...@intel.com>
Authored: Tue Jul 14 04:50:15 2015 -0400
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Tue Jul 14 04:50:15 2015 -0400

----------------------------------------------------------------------
 .../encryption_insert_partition_static.q        |  17 -
 .../encryption_insert_partition_static.q.out    | 739 +------------------
 2 files changed, 11 insertions(+), 745 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a65bcbdf/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
index c5769a6..69687df 100644
--- a/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
+++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
@@ -18,11 +18,6 @@ create table unencryptedTable(key string,
     value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 -- insert encrypted table from values
-explain extended insert into table encryptedTable partition
-    (ds='today') values
-    ('501', 'val_501'),
-    ('502', 'val_502');
-
 insert into table encryptedTable partition
     (ds='today') values
     ('501', 'val_501'),
@@ -31,27 +26,15 @@ insert into table encryptedTable partition
 select * from encryptedTable order by key;
 
 -- insert encrypted table from unencrypted source
-explain extended
-insert into table encryptedTable partition (ds='yesterday')
-select * from src where key in ('238', '86');
-
 insert into table encryptedTable partition (ds='yesterday')
 select * from src where key in ('238', '86');
 
 select * from encryptedTable order by key;
 
 -- insert unencrypted table from encrypted source
-explain extended
 insert into table unencryptedTable partition (ds='today')
 select key, value from encryptedTable where ds='today';
 
-insert into table unencryptedTable partition (ds='today')
-select key, value from encryptedTable where ds='today';
-
-explain extended
-insert into table unencryptedTable partition (ds='yesterday')
-select key, value from encryptedTable where ds='yesterday';
-
 insert into table unencryptedTable partition (ds='yesterday')
 select key, value from encryptedTable where ds='yesterday';
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a65bcbdf/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
index b10610c..c2f0ddc 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
@@ -39,187 +39,23 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@unencryptedTable
 PREHOOK: query: -- insert encrypted table from values
-explain extended insert into table encryptedTable partition
+insert into table encryptedTable partition
     (ds='today') values
     ('501', 'val_501'),
     ('502', 'val_502')
 PREHOOK: type: QUERY
-POSTHOOK: query: -- insert encrypted table from values
-explain extended insert into table encryptedTable partition
-    (ds='today') values
-    ('501', 'val_501'),
-    ('502', 'val_502')
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      null
-         null
-            Values__Tmp__Table__1
-   TOK_INSERT
-      TOK_INSERT_INTO
-         TOK_TAB
-            TOK_TABNAME
-               encryptedTable
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  'today'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: values__tmp__table__1
-            Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: tmp_values_col1 (type: string), tmp_values_col2 (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                sort order: 
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                tag: -1
-                value expressions: _col0 (type: string), _col1 (type: string)
-                auto parallelism: false
-      Path -> Alias:
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging
-      Path -> Partition:
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging
-          Partition
-            base file name: Values__Tmp__Table__1
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              bucket_count -1
-              columns tmp_values_col1,tmp_values_col2
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging
-              name default.values__tmp__table__1
-              serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns tmp_values_col1,tmp_values_col2
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging
-                name default.values__tmp__table__1
-                serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.values__tmp__table__1
-            name: default.values__tmp__table__1
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 1
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging
-            NumFilesPerFileSink: 1
-            Static Partition Specification: ds=today/
-            Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging
-            table:
-                input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-                properties:
-                  bucket_count 2
-                  bucket_field_name key
-                  columns key,value
-                  columns.comments 
-                  columns.types string:string
-#### A masked pattern was here ####
-                  name default.encryptedtable
-                  partition_columns ds
-                  partition_columns.types string
-                  serialization.ddl struct encryptedtable { string key, string value}
-                  serialization.format 1
-                  serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                  transactional true
-#### A masked pattern was here ####
-                serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                name: default.encryptedtable
-            TotalFiles: 1
-            GatherStats: true
-            MultiFileSpray: false
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds today
-          replace: false
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging
-          table:
-              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-              properties:
-                bucket_count 2
-                bucket_field_name key
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.encryptedtable
-                partition_columns ds
-                partition_columns.types string
-                serialization.ddl struct encryptedtable { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                transactional true
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              name: default.encryptedtable
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging
-
-PREHOOK: query: insert into table encryptedTable partition
-    (ds='today') values
-    ('501', 'val_501'),
-    ('502', 'val_502')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__2
+PREHOOK: Input: default@values__tmp__table__1
 PREHOOK: Output: default@encryptedtable@ds=today
-POSTHOOK: query: insert into table encryptedTable partition
+POSTHOOK: query: -- insert encrypted table from values
+insert into table encryptedTable partition
     (ds='today') values
     ('501', 'val_501'),
     ('502', 'val_502')
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__2
+POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@encryptedtable@ds=today
-POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).key SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).key SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).value SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
 PREHOOK: query: select * from encryptedTable order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@encryptedtable
@@ -233,196 +69,13 @@ POSTHOOK: Input: default@encryptedtable@ds=today
 501	val_501	today
 502	val_502	today
 PREHOOK: query: -- insert encrypted table from unencrypted source
-explain extended
 insert into table encryptedTable partition (ds='yesterday')
 select * from src where key in ('238', '86')
 PREHOOK: type: QUERY
-POSTHOOK: query: -- insert encrypted table from unencrypted source
-explain extended
-insert into table encryptedTable partition (ds='yesterday')
-select * from src where key in ('238', '86')
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            src
-   TOK_INSERT
-      TOK_INSERT_INTO
-         TOK_TAB
-            TOK_TABNAME
-               encryptedTable
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  'yesterday'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         TOK_FUNCTION
-            in
-            TOK_TABLE_OR_COL
-               key
-            '238'
-            '86'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: (key) IN ('238', '86') (type: boolean)
-              Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  sort order: 
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
-                  tag: -1
-                  value expressions: _col0 (type: string), _col1 (type: string)
-                  auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 0
-              rawDataSize 0
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE true
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 0
-                rawDataSize 0
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [src]
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 1
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging
-            NumFilesPerFileSink: 1
-            Static Partition Specification: ds=yesterday/
-            Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging
-            table:
-                input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-                properties:
-                  bucket_count 2
-                  bucket_field_name key
-                  columns key,value
-                  columns.comments 
-                  columns.types string:string
-#### A masked pattern was here ####
-                  name default.encryptedtable
-                  partition_columns ds
-                  partition_columns.types string
-                  serialization.ddl struct encryptedtable { string key, string value}
-                  serialization.format 1
-                  serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                  transactional true
-#### A masked pattern was here ####
-                serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                name: default.encryptedtable
-            TotalFiles: 1
-            GatherStats: true
-            MultiFileSpray: false
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds yesterday
-          replace: false
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging
-          table:
-              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-              properties:
-                bucket_count 2
-                bucket_field_name key
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.encryptedtable
-                partition_columns ds
-                partition_columns.types string
-                serialization.ddl struct encryptedtable { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                transactional true
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              name: default.encryptedtable
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging
-
-PREHOOK: query: insert into table encryptedTable partition (ds='yesterday')
-select * from src where key in ('238', '86')
-PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@encryptedtable@ds=yesterday
-POSTHOOK: query: insert into table encryptedTable partition (ds='yesterday')
+POSTHOOK: query: -- insert encrypted table from unencrypted source
+insert into table encryptedTable partition (ds='yesterday')
 select * from src where key in ('238', '86')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
@@ -447,199 +100,14 @@ POSTHOOK: Input: default@encryptedtable@ds=yesterday
 502	val_502	today
 86	val_86	yesterday
 PREHOOK: query: -- insert unencrypted table from encrypted source
-explain extended
 insert into table unencryptedTable partition (ds='today')
 select key, value from encryptedTable where ds='today'
 PREHOOK: type: QUERY
-POSTHOOK: query: -- insert unencrypted table from encrypted source
-explain extended
-insert into table unencryptedTable partition (ds='today')
-select key, value from encryptedTable where ds='today'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            encryptedTable
-   TOK_INSERT
-      TOK_INSERT_INTO
-         TOK_TAB
-            TOK_TABNAME
-               unencryptedTable
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  'today'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            'today'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: encryptedtable
-            Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                sort order: 
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
-                tag: -1
-                value expressions: _col0 (type: string), _col1 (type: string)
-                auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: ds=today
-            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-            partition values:
-              ds today
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count 2
-              bucket_field_name key
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.encryptedtable
-              numFiles 2
-              numRows 0
-              partition_columns ds
-              partition_columns.types string
-              rawDataSize 0
-              serialization.ddl struct encryptedtable { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              totalSize 1351
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-          
-              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-              properties:
-                bucket_count 2
-                bucket_field_name key
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.encryptedtable
-                partition_columns ds
-                partition_columns.types string
-                serialization.ddl struct encryptedtable { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                transactional true
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              name: default.encryptedtable
-            name: default.encryptedtable
-      Truncated Path -> Alias:
-        /encryptedTable/ds=today [encryptedtable]
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 1
-#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
-            NumFilesPerFileSink: 1
-            Static Partition Specification: ds=today/
-            Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
-#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
-            table:
-                input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-                properties:
-                  bucket_count 2
-                  bucket_field_name key
-                  columns key,value
-                  columns.comments 
-                  columns.types string:string
-#### A masked pattern was here ####
-                  name default.unencryptedtable
-                  partition_columns ds
-                  partition_columns.types string
-                  serialization.ddl struct unencryptedtable { string key, string value}
-                  serialization.format 1
-                  serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                  transactional true
-#### A masked pattern was here ####
-                serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                name: default.unencryptedtable
-            TotalFiles: 1
-            GatherStats: true
-            MultiFileSpray: false
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds today
-          replace: false
-#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
-          table:
-              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-              properties:
-                bucket_count 2
-                bucket_field_name key
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.unencryptedtable
-                partition_columns ds
-                partition_columns.types string
-                serialization.ddl struct unencryptedtable { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                transactional true
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              name: default.unencryptedtable
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
-
-PREHOOK: query: insert into table unencryptedTable partition (ds='today')
-select key, value from encryptedTable where ds='today'
-PREHOOK: type: QUERY
 PREHOOK: Input: default@encryptedtable
 PREHOOK: Input: default@encryptedtable@ds=today
 PREHOOK: Output: default@unencryptedtable@ds=today
-POSTHOOK: query: insert into table unencryptedTable partition (ds='today')
+POSTHOOK: query: -- insert unencrypted table from encrypted source
+insert into table unencryptedTable partition (ds='today')
 select key, value from encryptedTable where ds='today'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@encryptedtable
@@ -647,191 +115,6 @@ POSTHOOK: Input: default@encryptedtable@ds=today
 POSTHOOK: Output: default@unencryptedtable@ds=today
 POSTHOOK: Lineage: unencryptedtable PARTITION(ds=today).key SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: unencryptedtable PARTITION(ds=today).value SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:value, type:string, comment:null), ]
-PREHOOK: query: explain extended
-insert into table unencryptedTable partition (ds='yesterday')
-select key, value from encryptedTable where ds='yesterday'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-insert into table unencryptedTable partition (ds='yesterday')
-select key, value from encryptedTable where ds='yesterday'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            encryptedTable
-   TOK_INSERT
-      TOK_INSERT_INTO
-         TOK_TAB
-            TOK_TABNAME
-               unencryptedTable
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  'yesterday'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            'yesterday'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: encryptedtable
-            Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                sort order: 
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-                tag: -1
-                value expressions: _col0 (type: string), _col1 (type: string)
-                auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: ds=yesterday
-            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-            partition values:
-              ds yesterday
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count 2
-              bucket_field_name key
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.encryptedtable
-              numFiles 2
-              numRows 0
-              partition_columns ds
-              partition_columns.types string
-              rawDataSize 0
-              serialization.ddl struct encryptedtable { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              totalSize 1372
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-          
-              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-              properties:
-                bucket_count 2
-                bucket_field_name key
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.encryptedtable
-                partition_columns ds
-                partition_columns.types string
-                serialization.ddl struct encryptedtable { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                transactional true
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              name: default.encryptedtable
-            name: default.encryptedtable
-      Truncated Path -> Alias:
-        /encryptedTable/ds=yesterday [encryptedtable]
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 1
-#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=yesterday/.hive-staging
-            NumFilesPerFileSink: 1
-            Static Partition Specification: ds=yesterday/
-            Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=yesterday/.hive-staging
-            table:
-                input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-                properties:
-                  bucket_count 2
-                  bucket_field_name key
-                  columns key,value
-                  columns.comments 
-                  columns.types string:string
-#### A masked pattern was here ####
-                  name default.unencryptedtable
-                  partition_columns ds
-                  partition_columns.types string
-                  serialization.ddl struct unencryptedtable { string key, string value}
-                  serialization.format 1
-                  serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                  transactional true
-#### A masked pattern was here ####
-                serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                name: default.unencryptedtable
-            TotalFiles: 1
-            GatherStats: true
-            MultiFileSpray: false
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds yesterday
-          replace: false
-#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=yesterday/.hive-staging
-          table:
-              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-              properties:
-                bucket_count 2
-                bucket_field_name key
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.unencryptedtable
-                partition_columns ds
-                partition_columns.types string
-                serialization.ddl struct unencryptedtable { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                transactional true
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              name: default.unencryptedtable
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=yesterday/.hive-staging
-
 PREHOOK: query: insert into table unencryptedTable partition (ds='yesterday')
 select key, value from encryptedTable where ds='yesterday'
 PREHOOK: type: QUERY


[21/50] [abbrv] hive git commit: HIVE-11030 - Enhance storage layer to create one delta file per write (Eugene Koifman, reviewed by Alan Gates)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
index bebac54..11e5333 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
@@ -281,7 +281,7 @@ public class TestWorker extends CompactorTest {
     // Find the new delta file and make sure it has the right contents
     boolean sawNewDelta = false;
     for (int i = 0; i < stat.length; i++) {
-      if (stat[i].getPath().getName().equals("delta_0000021_0000024")) {
+      if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) {
         sawNewDelta = true;
         FileStatus[] buckets = fs.listStatus(stat[i].getPath());
         Assert.assertEquals(2, buckets.length);
@@ -296,6 +296,10 @@ public class TestWorker extends CompactorTest {
     Assert.assertTrue(sawNewDelta);
   }
 
+  /**
+   * todo: fix https://issues.apache.org/jira/browse/HIVE-9995
+   * @throws Exception
+   */
   @Test
   public void minorWithOpenInMiddle() throws Exception {
     LOG.debug("Starting minorWithOpenInMiddle");
@@ -321,15 +325,18 @@ public class TestWorker extends CompactorTest {
     // There should still now be 5 directories in the location
     FileSystem fs = FileSystem.get(conf);
     FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
-    Assert.assertEquals(5, stat.length);
+    boolean is130 = this instanceof TestWorker2;
+    Assert.assertEquals(is130 ? 5 : 4, stat.length);
 
     // Find the new delta file and make sure it has the right contents
     Arrays.sort(stat);
     Assert.assertEquals("base_20", stat[0].getPath().getName());
-    Assert.assertEquals("delta_0000021_0000022", stat[1].getPath().getName());
-    Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
-    Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
-    Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
+    if(is130) {//in1.3.0 orig delta is delta_00021_00022_0000 and compacted one is delta_00021_00022...
+      Assert.assertEquals(makeDeltaDirNameCompacted(21, 22), stat[1].getPath().getName());
+    }
+    Assert.assertEquals(makeDeltaDirName(21, 22), stat[1 + (is130 ? 1 : 0)].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(23, 25), stat[2 + (is130 ? 1 : 0)].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(26, 27), stat[3 + (is130 ? 1 : 0)].getPath().getName());
   }
 
   @Test
@@ -362,10 +369,10 @@ public class TestWorker extends CompactorTest {
     // Find the new delta file and make sure it has the right contents
     Arrays.sort(stat);
     Assert.assertEquals("base_20", stat[0].getPath().getName());
-    Assert.assertEquals("delta_0000021_0000027", stat[1].getPath().getName());
-    Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
-    Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
-    Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(21, 22), stat[1].getPath().getName());
+    Assert.assertEquals(makeDeltaDirNameCompacted(21, 27), stat[2].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(23, 25), stat[3].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(26, 27), stat[4].getPath().getName());
   }
 
   @Test
@@ -398,7 +405,7 @@ public class TestWorker extends CompactorTest {
     // Find the new delta file and make sure it has the right contents
     boolean sawNewDelta = false;
     for (int i = 0; i < stat.length; i++) {
-      if (stat[i].getPath().getName().equals("delta_0000021_0000024")) {
+      if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) {
         sawNewDelta = true;
         FileStatus[] buckets = fs.listStatus(stat[i].getPath());
         Assert.assertEquals(2, buckets.length);
@@ -441,7 +448,7 @@ public class TestWorker extends CompactorTest {
     // Find the new delta file and make sure it has the right contents
     boolean sawNewDelta = false;
     for (int i = 0; i < stat.length; i++) {
-      if (stat[i].getPath().getName().equals("delta_0000001_0000004")) {
+      if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(1, 4))) {
         sawNewDelta = true;
         FileStatus[] buckets = fs.listStatus(stat[i].getPath());
         Assert.assertEquals(2, buckets.length);
@@ -661,7 +668,7 @@ public class TestWorker extends CompactorTest {
     // Find the new delta file and make sure it has the right contents
     boolean sawNewDelta = false;
     for (int i = 0; i < stat.length; i++) {
-      if (stat[i].getPath().getName().equals("delta_0000021_0000024")) {
+      if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) {
         sawNewDelta = true;
         FileStatus[] buckets = fs.listStatus(stat[i].getPath());
         Assert.assertEquals(2, buckets.length);
@@ -760,9 +767,9 @@ public class TestWorker extends CompactorTest {
     Arrays.sort(stat);
     Assert.assertEquals("base_0000022", stat[0].getPath().getName());
     Assert.assertEquals("base_20", stat[1].getPath().getName());
-    Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
-    Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
-    Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(21, 22), stat[2].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(23, 25), stat[3].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(26, 27), stat[4].getPath().getName());
   }
 
   @Test
@@ -796,9 +803,13 @@ public class TestWorker extends CompactorTest {
     Arrays.sort(stat);
     Assert.assertEquals("base_0000027", stat[0].getPath().getName());
     Assert.assertEquals("base_20", stat[1].getPath().getName());
-    Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
-    Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
-    Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(21, 22), stat[2].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(23, 25), stat[3].getPath().getName());
+    Assert.assertEquals(makeDeltaDirName(26, 27), stat[4].getPath().getName());
+  }
+  @Override
+  boolean useHive130DeltaDirName() {
+    return false;
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker2.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker2.java
new file mode 100644
index 0000000..3b5283a
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker2.java
@@ -0,0 +1,16 @@
+package org.apache.hadoop.hive.ql.txn.compactor;
+
+/**
+ * Same as TestWorker but tests delta file names in Hive 1.3.0 format 
+ */
+public class TestWorker2 extends TestWorker {
+
+  public TestWorker2() throws Exception {
+    super();
+  }
+
+  @Override
+  boolean useHive130DeltaDirName() {
+    return true;
+  }
+}


[28/50] [abbrv] hive git commit: HIVE-10882 : CBO: Calcite Operator To Hive Operator (Calcite Return Path) empty filtersMap of join operator causes wrong results (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
new file mode 100644
index 0000000..087edf2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
@@ -0,0 +1,1694 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_FULLOUTERJOIN
+         TOK_TABREF
+            TOK_TABNAME
+               src
+            a
+         TOK_TABREF
+            TOK_TABNAME
+               srcpart
+            b
+         AND
+            =
+               .
+                  TOK_TABLE_OR_COL
+                     a
+                  key
+               .
+                  TOK_TABLE_OR_COL
+                     b
+                  key
+            =
+               .
+                  TOK_TABLE_OR_COL
+                     b
+                  ds
+               '2008-04-08'
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               value
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               value
+      TOK_WHERE
+         AND
+            AND
+               AND
+                  >
+                     .
+                        TOK_TABLE_OR_COL
+                           a
+                        key
+                     10
+                  <
+                     .
+                        TOK_TABLE_OR_COL
+                           a
+                        key
+                     20
+               >
+                  .
+                     TOK_TABLE_OR_COL
+                        b
+                     key
+                  15
+            <
+               .
+                  TOK_TABLE_OR_COL
+                     b
+                  key
+               25
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                tag: 0
+                value expressions: _col1 (type: string)
+                auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string), ds (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                tag: 1
+                value expressions: _col1 (type: string), _col2 (type: string)
+                auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE true
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [$hdt$_0:a]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
+        /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
+        /srcpart/ds=2008-04-09/hr=11 [$hdt$_1:b]
+        /srcpart/ds=2008-04-09/hr=12 [$hdt$_1:b]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Outer Join 0 to 1
+          filter mappings:
+            1 [0, 1]
+          filter predicates:
+            0 
+            1 {(VALUE._col1 = '2008-04-08')}
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) and (UDFToDouble(_col2) > 15.0)) and (UDFToDouble(_col2) < 25.0)) (type: boolean)
+            Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3
+                    columns.types string:string:string:string
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+17	val_17	17	val_17
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+19	val_19	19	val_19
+PREHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key AND a.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key AND a.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_FULLOUTERJOIN
+         TOK_TABREF
+            TOK_TABNAME
+               srcpart
+            a
+         TOK_TABREF
+            TOK_TABNAME
+               src
+            b
+         AND
+            =
+               .
+                  TOK_TABLE_OR_COL
+                     a
+                  key
+               .
+                  TOK_TABLE_OR_COL
+                     b
+                  key
+            =
+               .
+                  TOK_TABLE_OR_COL
+                     a
+                  ds
+               '2008-04-08'
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               value
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               value
+      TOK_WHERE
+         AND
+            AND
+               AND
+                  >
+                     .
+                        TOK_TABLE_OR_COL
+                           a
+                        key
+                     10
+                  <
+                     .
+                        TOK_TABLE_OR_COL
+                           a
+                        key
+                     20
+               >
+                  .
+                     TOK_TABLE_OR_COL
+                        b
+                     key
+                  15
+            <
+               .
+                  TOK_TABLE_OR_COL
+                     b
+                  key
+               25
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string), ds (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                tag: 0
+                value expressions: _col1 (type: string), _col2 (type: string)
+                auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                tag: 1
+                value expressions: _col1 (type: string)
+                auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE true
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [$hdt$_1:b]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:a]
+        /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:a]
+        /srcpart/ds=2008-04-09/hr=11 [$hdt$_0:a]
+        /srcpart/ds=2008-04-09/hr=12 [$hdt$_0:a]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Outer Join 0 to 1
+          filter mappings:
+            0 [1, 1]
+          filter predicates:
+            0 {(VALUE._col1 = '2008-04-08')}
+            1 
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col3, _col4
+          Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) and (UDFToDouble(_col3) > 15.0)) and (UDFToDouble(_col3) < 25.0)) (type: boolean)
+            Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      columns _col0,_col1,_col2,_col3
+                      columns.types string:string:string:string
+                      escape.delim \
+                      hive.serialization.extend.additional.nesting.levels true
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key AND a.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key AND a.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+17	val_17	17	val_17
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+19	val_19	19	val_19
+PREHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_FULLOUTERJOIN
+         TOK_TABREF
+            TOK_TABNAME
+               src
+            a
+         TOK_TABREF
+            TOK_TABNAME
+               srcpart
+            b
+         =
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               value
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               value
+      TOK_WHERE
+         AND
+            AND
+               AND
+                  AND
+                     >
+                        .
+                           TOK_TABLE_OR_COL
+                              a
+                           key
+                        10
+                     <
+                        .
+                           TOK_TABLE_OR_COL
+                              a
+                           key
+                        20
+                  >
+                     .
+                        TOK_TABLE_OR_COL
+                           b
+                        key
+                     15
+               <
+                  .
+                     TOK_TABLE_OR_COL
+                        b
+                     key
+                  25
+            =
+               .
+                  TOK_TABLE_OR_COL
+                     b
+                  ds
+               '2008-04-08'
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  tag: 0
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                  tag: 1
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE true
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [$hdt$_0:a]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
+        /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean)
+            Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3
+                    columns.types string:string:string:string
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+17	val_17	17	val_17
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+19	val_19	19	val_19
+PREHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_FULLOUTERJOIN
+         TOK_TABREF
+            TOK_TABNAME
+               srcpart
+            a
+         TOK_TABREF
+            TOK_TABNAME
+               src
+            b
+         =
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               value
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               value
+      TOK_WHERE
+         AND
+            AND
+               AND
+                  AND
+                     >
+                        .
+                           TOK_TABLE_OR_COL
+                              a
+                           key
+                        10
+                     <
+                        .
+                           TOK_TABLE_OR_COL
+                              a
+                           key
+                        20
+                  >
+                     .
+                        TOK_TABLE_OR_COL
+                           b
+                        key
+                     15
+               <
+                  .
+                     TOK_TABLE_OR_COL
+                        b
+                     key
+                  25
+            =
+               .
+                  TOK_TABLE_OR_COL
+                     a
+                  ds
+               '2008-04-08'
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                  tag: 0
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  tag: 1
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE true
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [$hdt$_1:b]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:a]
+        /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:a]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col3, _col4
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((UDFToDouble(_col3) > 15.0) and (UDFToDouble(_col3) < 25.0)) (type: boolean)
+            Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      columns _col0,_col1,_col2,_col3
+                      columns.types string:string:string:string
+                      escape.delim \
+                      hive.serialization.extend.additional.nesting.levels true
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  srcpart a
+ FULL OUTER JOIN 
+  src b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+17	val_17	17	val_17
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+19	val_19	19	val_19


[43/50] [abbrv] hive git commit: HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
index 4b84eca..12e1fbe 100644
--- a/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
@@ -66,8 +66,6 @@ Partition Value:    	[2008, 11]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -108,8 +106,6 @@ Partition Value:    	[2008, 12]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -190,8 +186,6 @@ Partition Value:    	[10, 11]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -232,8 +226,6 @@ Partition Value:    	[10, 12]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -323,8 +315,6 @@ Partition Value:    	[1997]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -363,8 +353,6 @@ Partition Value:    	[1994]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -403,8 +391,6 @@ Partition Value:    	[1998]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -443,8 +429,6 @@ Partition Value:    	[1996]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
index 80c3092..2559492 100644
--- a/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
@@ -100,8 +100,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -142,8 +140,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -184,8 +180,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -226,8 +220,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -266,7 +258,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -381,8 +372,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -423,8 +412,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -465,8 +452,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -507,8 +492,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out b/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
index 105c222..8136c39 100644
--- a/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
@@ -47,7 +47,6 @@ a                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	EXTERNAL_TABLE      	 
@@ -91,7 +90,6 @@ a                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	EXTERNAL_TABLE      	 
@@ -231,8 +229,6 @@ Partition Value:    	[2008-01-01]
 Database:           	default             	 
 Table:              	anaylyze_external   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -284,8 +280,6 @@ Partition Value:    	[2008-01-01]
 Database:           	default             	 
 Table:              	anaylyze_external   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
index cceceef..cb0920e 100644
--- a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
@@ -229,8 +229,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -272,8 +270,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out b/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
index adcf150..eb0145b 100644
--- a/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
@@ -75,8 +75,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -150,8 +148,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -192,8 +188,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/statsfs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/statsfs.q.out b/ql/src/test/results/clientpositive/spark/statsfs.q.out
index b0bca41..2735f5f 100644
--- a/ql/src/test/results/clientpositive/spark/statsfs.q.out
+++ b/ql/src/test/results/clientpositive/spark/statsfs.q.out
@@ -65,8 +65,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -106,8 +104,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -187,8 +183,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -228,8 +222,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -298,7 +290,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -364,7 +355,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -457,8 +447,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -499,8 +487,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	t1                  	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
index c7616b4..a4b5836 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
@@ -193,7 +193,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
index 586b88b..d98b388 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
@@ -255,7 +255,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
index 9ddf606..1e7d5cf 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
@@ -245,7 +245,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
index fdaa941..e57626f 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
@@ -245,7 +245,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
index 995d180..bb294cd 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
@@ -271,7 +271,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
index e23ecf4..3bb9194 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
@@ -247,7 +247,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
index 62dd62c..354a48f 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
@@ -217,7 +217,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
index 42aa20a..57059dc 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
@@ -257,7 +257,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
index c6fdfa5..c8047a9 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
@@ -168,7 +168,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
index d732571..a4d1793 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
@@ -227,7 +227,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
index 982dd67..c6e2070 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
@@ -197,7 +197,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
index 3993ac5..d886433 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
@@ -199,7 +199,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
index 13d3b5c..4404b87 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
@@ -203,7 +203,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
index f5c7636..ccefacf 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
@@ -187,7 +187,6 @@ key                 	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
index d00fb89..e53e963 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
@@ -207,7 +207,6 @@ values2             	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_23.q.out b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out
index 3fa6ae0..adb0c62 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_23.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out
@@ -227,7 +227,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
index c0ce83f..9237316 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
@@ -199,7 +199,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
index 8cd8c8d..6e96186 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
@@ -218,8 +218,6 @@ Partition Value:    	[2004]
 Database:           	default             	 
 Table:              	outputtbl1          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	4                   
@@ -429,8 +427,6 @@ Partition Value:    	[2008-04-08]
 Database:           	default             	 
 Table:              	outputtbl2          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	2                   
@@ -624,8 +620,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	outputtbl3          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out
index 2126c1d..565b834 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out
@@ -189,7 +189,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
index 7dd8e3e..a5730c1 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
@@ -243,7 +243,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
index fe1fd8b..908298d 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
@@ -251,7 +251,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
index 530be3f..5e88d10 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
@@ -197,7 +197,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
index 06adb05..b33767c 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
@@ -203,7 +203,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
index 7857999..e837bd7 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
@@ -259,7 +259,6 @@ values              	bigint
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats1.q.out b/ql/src/test/results/clientpositive/stats1.q.out
index 6f560d3..ac34bbb 100644
--- a/ql/src/test/results/clientpositive/stats1.q.out
+++ b/ql/src/test/results/clientpositive/stats1.q.out
@@ -178,7 +178,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -228,7 +227,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats10.q.out b/ql/src/test/results/clientpositive/stats10.q.out
index dc8aa7f..7824cbd 100644
--- a/ql/src/test/results/clientpositive/stats10.q.out
+++ b/ql/src/test/results/clientpositive/stats10.q.out
@@ -413,8 +413,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	bucket3_1           	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -454,8 +452,6 @@ Partition Value:    	[2]
 Database:           	default             	 
 Table:              	bucket3_1           	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -493,7 +489,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats11.q.out b/ql/src/test/results/clientpositive/stats11.q.out
index e51f049..4ed235f 100644
--- a/ql/src/test/results/clientpositive/stats11.q.out
+++ b/ql/src/test/results/clientpositive/stats11.q.out
@@ -86,8 +86,6 @@ Partition Value:    	[2008-04-08]
 Database:           	default             	 
 Table:              	srcbucket_mapjoin_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -135,8 +133,6 @@ Partition Value:    	[2008-04-08]
 Database:           	default             	 
 Table:              	srcbucket_mapjoin_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
@@ -184,8 +180,6 @@ Partition Value:    	[2008-04-08]
 Database:           	default             	 
 Table:              	srcbucket_mapjoin_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	3                   
@@ -233,8 +227,6 @@ Partition Value:    	[2008-04-08]
 Database:           	default             	 
 Table:              	srcbucket_mapjoin_part	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats12.q.out b/ql/src/test/results/clientpositive/stats12.q.out
index d5ca59b..c6e7c68 100644
--- a/ql/src/test/results/clientpositive/stats12.q.out
+++ b/ql/src/test/results/clientpositive/stats12.q.out
@@ -205,7 +205,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -244,8 +243,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -286,8 +283,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -328,8 +323,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -370,8 +363,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats13.q.out b/ql/src/test/results/clientpositive/stats13.q.out
index e34f0b7..7415728 100644
--- a/ql/src/test/results/clientpositive/stats13.q.out
+++ b/ql/src/test/results/clientpositive/stats13.q.out
@@ -155,7 +155,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -194,8 +193,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -236,8 +233,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -278,8 +273,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -320,8 +313,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -368,7 +359,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats14.q.out b/ql/src/test/results/clientpositive/stats14.q.out
index f12b136..f34720d 100644
--- a/ql/src/test/results/clientpositive/stats14.q.out
+++ b/ql/src/test/results/clientpositive/stats14.q.out
@@ -38,7 +38,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -142,7 +141,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -181,8 +179,6 @@ Partition Value:    	[2010-04-08, 11]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -223,8 +219,6 @@ Partition Value:    	[2010-04-08, 12]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -283,7 +277,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats15.q.out b/ql/src/test/results/clientpositive/stats15.q.out
index a60dee2..aad2e3a 100644
--- a/ql/src/test/results/clientpositive/stats15.q.out
+++ b/ql/src/test/results/clientpositive/stats15.q.out
@@ -38,7 +38,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -142,7 +141,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -181,8 +179,6 @@ Partition Value:    	[2010-04-08, 11]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -223,8 +219,6 @@ Partition Value:    	[2010-04-08, 12]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -283,7 +277,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats16.q.out b/ql/src/test/results/clientpositive/stats16.q.out
index 3f0f2ea..2e3cadb 100644
--- a/ql/src/test/results/clientpositive/stats16.q.out
+++ b/ql/src/test/results/clientpositive/stats16.q.out
@@ -24,7 +24,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -73,7 +72,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats18.q.out b/ql/src/test/results/clientpositive/stats18.q.out
index a061846..a7d6ab8 100644
--- a/ql/src/test/results/clientpositive/stats18.q.out
+++ b/ql/src/test/results/clientpositive/stats18.q.out
@@ -44,8 +44,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -94,8 +92,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats19.q.out b/ql/src/test/results/clientpositive/stats19.q.out
index 499a8bd..ea56f3a 100644
--- a/ql/src/test/results/clientpositive/stats19.q.out
+++ b/ql/src/test/results/clientpositive/stats19.q.out
@@ -98,8 +98,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -152,8 +150,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -206,8 +202,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -312,8 +306,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -366,8 +358,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -420,8 +410,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats2.q.out b/ql/src/test/results/clientpositive/stats2.q.out
index 5e305d3..ac1d5cb 100644
--- a/ql/src/test/results/clientpositive/stats2.q.out
+++ b/ql/src/test/results/clientpositive/stats2.q.out
@@ -96,7 +96,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -176,7 +175,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats20.q.out b/ql/src/test/results/clientpositive/stats20.q.out
index 4ac7bc5..d7e52b4 100644
--- a/ql/src/test/results/clientpositive/stats20.q.out
+++ b/ql/src/test/results/clientpositive/stats20.q.out
@@ -39,7 +39,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -89,7 +88,6 @@ ds                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats3.q.out b/ql/src/test/results/clientpositive/stats3.q.out
index dd3a95b..2afb76e 100644
--- a/ql/src/test/results/clientpositive/stats3.q.out
+++ b/ql/src/test/results/clientpositive/stats3.q.out
@@ -82,7 +82,6 @@ col1                	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -227,7 +226,6 @@ pcol2               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats4.q.out b/ql/src/test/results/clientpositive/stats4.q.out
index 39d5413..9ced932 100644
--- a/ql/src/test/results/clientpositive/stats4.q.out
+++ b/ql/src/test/results/clientpositive/stats4.q.out
@@ -2308,8 +2308,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	nzhang_part1        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2350,8 +2348,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	nzhang_part1        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2392,8 +2388,6 @@ Partition Value:    	[2008-12-31, 11]
 Database:           	default             	 
 Table:              	nzhang_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2434,8 +2428,6 @@ Partition Value:    	[2008-12-31, 12]
 Database:           	default             	 
 Table:              	nzhang_part2        	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -2474,7 +2466,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -2511,7 +2502,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats5.q.out b/ql/src/test/results/clientpositive/stats5.q.out
index 484e834..23d4e6b 100644
--- a/ql/src/test/results/clientpositive/stats5.q.out
+++ b/ql/src/test/results/clientpositive/stats5.q.out
@@ -49,7 +49,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats6.q.out b/ql/src/test/results/clientpositive/stats6.q.out
index b4435f2..a387075 100644
--- a/ql/src/test/results/clientpositive/stats6.q.out
+++ b/ql/src/test/results/clientpositive/stats6.q.out
@@ -79,8 +79,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -121,8 +119,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -163,8 +159,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -205,8 +199,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -245,7 +237,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats7.q.out b/ql/src/test/results/clientpositive/stats7.q.out
index 03b76e2..7f32764 100644
--- a/ql/src/test/results/clientpositive/stats7.q.out
+++ b/ql/src/test/results/clientpositive/stats7.q.out
@@ -90,8 +90,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -132,8 +130,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -172,7 +168,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats8.q.out b/ql/src/test/results/clientpositive/stats8.q.out
index ae632e1..80dd4e8 100644
--- a/ql/src/test/results/clientpositive/stats8.q.out
+++ b/ql/src/test/results/clientpositive/stats8.q.out
@@ -86,8 +86,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -126,7 +124,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -196,8 +193,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -269,8 +264,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -342,8 +335,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -427,8 +418,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -469,8 +458,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -511,8 +498,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -553,8 +538,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -593,7 +576,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats9.q.out b/ql/src/test/results/clientpositive/stats9.q.out
index 8563f3b..e7c7743 100644
--- a/ql/src/test/results/clientpositive/stats9.q.out
+++ b/ql/src/test/results/clientpositive/stats9.q.out
@@ -57,7 +57,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_counter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_counter.q.out b/ql/src/test/results/clientpositive/stats_counter.q.out
index e2980e8..8b3dcea 100644
--- a/ql/src/test/results/clientpositive/stats_counter.q.out
+++ b/ql/src/test/results/clientpositive/stats_counter.q.out
@@ -32,7 +32,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -80,7 +79,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
index ab1270c..626dcff 100644
--- a/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
@@ -66,8 +66,6 @@ Partition Value:    	[2008, 11]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -108,8 +106,6 @@ Partition Value:    	[2008, 12]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -190,8 +186,6 @@ Partition Value:    	[10, 11]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -232,8 +226,6 @@ Partition Value:    	[10, 12]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -323,8 +315,6 @@ Partition Value:    	[1997]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -363,8 +353,6 @@ Partition Value:    	[1994]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -403,8 +391,6 @@ Partition Value:    	[1998]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -443,8 +429,6 @@ Partition Value:    	[1996]
 Database:           	default             	 
 Table:              	dummy               	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_empty_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_empty_partition.q.out b/ql/src/test/results/clientpositive/stats_empty_partition.q.out
index 65e0a6f..c13817e 100644
--- a/ql/src/test/results/clientpositive/stats_empty_partition.q.out
+++ b/ql/src/test/results/clientpositive/stats_empty_partition.q.out
@@ -43,8 +43,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tmptable            	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_invalidation.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_invalidation.q.out b/ql/src/test/results/clientpositive/stats_invalidation.q.out
index f8c1731..1bb7dc6 100644
--- a/ql/src/test/results/clientpositive/stats_invalidation.q.out
+++ b/ql/src/test/results/clientpositive/stats_invalidation.q.out
@@ -40,7 +40,6 @@ value               	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -85,7 +84,6 @@ new_col             	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
index 5a05eb4..63372c5 100644
--- a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
@@ -74,8 +74,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	stats_list_bucket   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -151,7 +149,6 @@ c2                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
index 1b26365..8688cee 100644
--- a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
@@ -74,8 +74,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	stats_list_bucket   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	4                   
@@ -151,7 +149,6 @@ c2                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_noscan_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/stats_noscan_1.q.out
index 80c3092..2559492 100644
--- a/ql/src/test/results/clientpositive/stats_noscan_1.q.out
+++ b/ql/src/test/results/clientpositive/stats_noscan_1.q.out
@@ -100,8 +100,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -142,8 +140,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -184,8 +180,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -226,8 +220,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -266,7 +258,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -381,8 +372,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -423,8 +412,6 @@ Partition Value:    	[2008-04-08, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -465,8 +452,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -507,8 +492,6 @@ Partition Value:    	[2008-04-09, 12]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_noscan_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_noscan_2.q.out b/ql/src/test/results/clientpositive/stats_noscan_2.q.out
index 105c222..8136c39 100644
--- a/ql/src/test/results/clientpositive/stats_noscan_2.q.out
+++ b/ql/src/test/results/clientpositive/stats_noscan_2.q.out
@@ -47,7 +47,6 @@ a                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	EXTERNAL_TABLE      	 
@@ -91,7 +90,6 @@ a                   	int
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	EXTERNAL_TABLE      	 
@@ -231,8 +229,6 @@ Partition Value:    	[2008-01-01]
 Database:           	default             	 
 Table:              	anaylyze_external   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -284,8 +280,6 @@ Partition Value:    	[2008-01-01]
 Database:           	default             	 
 Table:              	anaylyze_external   	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_only_null.q.out b/ql/src/test/results/clientpositive/stats_only_null.q.out
index f99b85a..55c5970 100644
--- a/ql/src/test/results/clientpositive/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/stats_only_null.q.out
@@ -217,8 +217,6 @@ Partition Value:    	[2010]
 Database:           	default             	 
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -260,8 +258,6 @@ Partition Value:    	[2011]
 Database:           	default             	 
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_partscan_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_partscan_1.q.out b/ql/src/test/results/clientpositive/stats_partscan_1.q.out
index b15fbc7..e2a3bbd 100644
--- a/ql/src/test/results/clientpositive/stats_partscan_1.q.out
+++ b/ql/src/test/results/clientpositive/stats_partscan_1.q.out
@@ -74,8 +74,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   
@@ -149,8 +147,6 @@ Partition Value:    	[2008-04-08, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -191,8 +187,6 @@ Partition Value:    	[2008-04-09, 11]
 Database:           	default             	 
 Table:              	analyze_srcpart_partial_scan	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	false               
 	numFiles            	1                   


[26/50] [abbrv] hive git commit: HIVE-11228 - Mutation API should use semi-shared locks. (Elliot West, via Eugene Koifman)

Posted by xu...@apache.org.
HIVE-11228 - Mutation API should use semi-shared locks. (Elliot West, via Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3301b92b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3301b92b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3301b92b

Branch: refs/heads/beeline-cli
Commit: 3301b92bcb2a1f779e76d174cd9ac6d83fc66938
Parents: 17f759d
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Mon Jul 13 09:42:07 2015 -0700
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Mon Jul 13 09:42:26 2015 -0700

----------------------------------------------------------------------
 .../streaming/mutate/client/MutatorClient.java  |  11 +-
 .../streaming/mutate/client/lock/Lock.java      |  73 +++++++----
 .../hive/hcatalog/streaming/mutate/package.html |   8 +-
 .../streaming/mutate/client/lock/TestLock.java  | 121 ++++++++++++-------
 4 files changed, 136 insertions(+), 77 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3301b92b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java
index 2724525..29b828d 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java
@@ -42,7 +42,16 @@ public class MutatorClient implements Closeable {
         .lockFailureListener(lockFailureListener == null ? LockFailureListener.NULL_LISTENER : lockFailureListener)
         .user(user);
     for (AcidTable table : tables) {
-      lockOptions.addTable(table.getDatabaseName(), table.getTableName());
+      switch (table.getTableType()) {
+      case SOURCE:
+        lockOptions.addSourceTable(table.getDatabaseName(), table.getTableName());
+        break;
+      case SINK:
+        lockOptions.addSinkTable(table.getDatabaseName(), table.getTableName());
+        break;
+      default:
+        throw new IllegalArgumentException("Unknown TableType: " + table.getTableType());
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3301b92b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
index 21604df..ad0b303 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
@@ -2,6 +2,7 @@ package org.apache.hive.hcatalog.streaming.mutate.client.lock;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Set;
@@ -35,7 +36,8 @@ public class Lock {
   private final IMetaStoreClient metaStoreClient;
   private final HeartbeatFactory heartbeatFactory;
   private final LockFailureListener listener;
-  private final Collection<Table> tableDescriptors;
+  private final Collection<Table> sinks;
+  private final Collection<Table> tables = new HashSet<>();
   private final int lockRetries;
   private final int retryWaitSeconds;
   private final String user;
@@ -46,23 +48,26 @@ public class Lock {
   private Long transactionId;
 
   public Lock(IMetaStoreClient metaStoreClient, Options options) {
-    this(metaStoreClient, new HeartbeatFactory(), options.hiveConf, options.listener, options.user,
-        options.descriptors, options.lockRetries, options.retryWaitSeconds);
+    this(metaStoreClient, new HeartbeatFactory(), options.hiveConf, options.listener, options.user, options.sources,
+        options.sinks, options.lockRetries, options.retryWaitSeconds);
   }
 
   /** Visible for testing only. */
   Lock(IMetaStoreClient metaStoreClient, HeartbeatFactory heartbeatFactory, HiveConf hiveConf,
-      LockFailureListener listener, String user, Collection<Table> tableDescriptors, int lockRetries,
+      LockFailureListener listener, String user, Collection<Table> sources, Collection<Table> sinks, int lockRetries,
       int retryWaitSeconds) {
     this.metaStoreClient = metaStoreClient;
     this.heartbeatFactory = heartbeatFactory;
     this.hiveConf = hiveConf;
     this.user = user;
-    this.tableDescriptors = tableDescriptors;
     this.listener = listener;
     this.lockRetries = lockRetries;
     this.retryWaitSeconds = retryWaitSeconds;
 
+    this.sinks = sinks;
+    tables.addAll(sources);
+    tables.addAll(sinks);
+
     if (LockFailureListener.NULL_LISTENER.equals(listener)) {
       LOG.warn("No {} supplied. Data quality and availability cannot be assured.",
           LockFailureListener.class.getSimpleName());
@@ -77,6 +82,9 @@ public class Lock {
 
   /** Attempts to acquire a read lock on the table, returns if successful, throws exception otherwise. */
   public void acquire(long transactionId) throws LockException {
+    if (transactionId <= 0) {
+      throw new IllegalArgumentException("Invalid transaction id: " + transactionId);
+    }
     lockId = internalAcquire(transactionId);
     this.transactionId = transactionId;
     initiateHeartbeat();
@@ -96,19 +104,18 @@ public class Lock {
 
   @Override
   public String toString() {
-    return "Lock [metaStoreClient=" + metaStoreClient + ", lockId=" + lockId + ", transactionId=" + transactionId
-        + "]";
+    return "Lock [metaStoreClient=" + metaStoreClient + ", lockId=" + lockId + ", transactionId=" + transactionId + "]";
   }
 
   private long internalAcquire(Long transactionId) throws LockException {
     int attempts = 0;
-    LockRequest request = buildSharedLockRequest(transactionId);
+    LockRequest request = buildLockRequest(transactionId);
     do {
       LockResponse response = null;
       try {
         response = metaStoreClient.lock(request);
       } catch (TException e) {
-        throw new LockException("Unable to acquire lock for tables: [" + join(tableDescriptors) + "]", e);
+        throw new LockException("Unable to acquire lock for tables: [" + join(tables) + "]", e);
       }
       if (response != null) {
         LockState state = response.getState();
@@ -129,7 +136,7 @@ public class Lock {
       }
       attempts++;
     } while (attempts < lockRetries);
-    throw new LockException("Could not acquire lock on tables: [" + join(tableDescriptors) + "]");
+    throw new LockException("Could not acquire lock on tables: [" + join(tables) + "]");
   }
 
   private void internalRelease() {
@@ -142,18 +149,24 @@ public class Lock {
       }
     } catch (TException e) {
       LOG.error("Lock " + lockId + " failed.", e);
-      listener.lockFailed(lockId, transactionId, asStrings(tableDescriptors), e);
+      listener.lockFailed(lockId, transactionId, asStrings(tables), e);
     }
   }
 
-  private LockRequest buildSharedLockRequest(Long transactionId) {
+  private LockRequest buildLockRequest(Long transactionId) {
+    if (transactionId == null && !sinks.isEmpty()) {
+      throw new IllegalArgumentException("Cannot sink to tables outside of a transaction: sinks=" + asStrings(sinks));
+    }
     LockRequestBuilder requestBuilder = new LockRequestBuilder();
-    for (Table descriptor : tableDescriptors) {
-      LockComponent component = new LockComponentBuilder()
-          .setDbName(descriptor.getDbName())
-          .setTableName(descriptor.getTableName())
-          .setShared()
-          .build();
+    for (Table table : tables) {
+      LockComponentBuilder componentBuilder = new LockComponentBuilder().setDbName(table.getDbName()).setTableName(
+          table.getTableName());
+      if (sinks.contains(table)) {
+        componentBuilder.setSemiShared();
+      } else {
+        componentBuilder.setShared();
+      }
+      LockComponent component = componentBuilder.build();
       requestBuilder.addLockComponent(component);
     }
     if (transactionId != null) {
@@ -166,8 +179,7 @@ public class Lock {
   private void initiateHeartbeat() {
     int heartbeatPeriod = getHeartbeatPeriod();
     LOG.debug("Heartbeat period {}s", heartbeatPeriod);
-    heartbeat = heartbeatFactory.newInstance(metaStoreClient, listener, transactionId, tableDescriptors, lockId,
-        heartbeatPeriod);
+    heartbeat = heartbeatFactory.newInstance(metaStoreClient, listener, transactionId, tables, lockId, heartbeatPeriod);
   }
 
   private int getHeartbeatPeriod() {
@@ -210,22 +222,33 @@ public class Lock {
 
   /** Constructs a lock options for a set of Hive ACID tables from which we wish to read. */
   public static final class Options {
-    Set<Table> descriptors = new LinkedHashSet<>();
+    Set<Table> sources = new LinkedHashSet<>();
+    Set<Table> sinks = new LinkedHashSet<>();
     LockFailureListener listener = LockFailureListener.NULL_LISTENER;
     int lockRetries = 5;
     int retryWaitSeconds = 30;
     String user;
     HiveConf hiveConf;
 
-    /** Adds a table for which a shared read lock will be requested. */
-    public Options addTable(String databaseName, String tableName) {
+    /** Adds a table for which a shared lock will be requested. */
+    public Options addSourceTable(String databaseName, String tableName) {
+      addTable(databaseName, tableName, sources);
+      return this;
+    }
+
+    /** Adds a table for which a semi-shared lock will be requested. */
+    public Options addSinkTable(String databaseName, String tableName) {
+      addTable(databaseName, tableName, sinks);
+      return this;
+    }
+
+    private void addTable(String databaseName, String tableName, Set<Table> tables) {
       checkNotNullOrEmpty(databaseName);
       checkNotNullOrEmpty(tableName);
       Table table = new Table();
       table.setDbName(databaseName);
       table.setTableName(tableName);
-      descriptors.add(table);
-      return this;
+      tables.add(table);
     }
 
     public Options user(String user) {

http://git-wip-us.apache.org/repos/asf/hive/blob/3301b92b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html
index 9fc10b6..09a55b6 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html
@@ -421,7 +421,7 @@ automatically (say on a hourly basis). In such cases requiring the Hive
 admin to pre-create the necessary partitions may not be reasonable.
 Consequently the API allows coordinators to create partitions as needed
 (see:
-<code>MutatorClientBuilder.addTable(String, String, boolean)</code>
+<code>MutatorClientBuilder.addSinkTable(String, String, boolean)</code>
 ). Partition creation being an atomic action, multiple coordinators can
 race to create the partition, but only one would succeed, so
 coordinators clients need not synchronize when creating a partition. The
@@ -440,14 +440,14 @@ consistent manner requires the following:
 <ol>
 <li>Obtaining a valid transaction list from the meta store (<code>ValidTxnList</code>).
 </li>
-<li>Acquiring a read-lock with the meta store and issuing
-heartbeats (<code>LockImpl</code> can help with this).
+<li>Acquiring a lock with the meta store and issuing heartbeats (<code>LockImpl</code>
+can help with this).
 </li>
 <li>Configuring the <code>OrcInputFormat</code> and then reading
 the data. Make sure that you also pull in the <code>ROW__ID</code>
 values. See: <code>AcidRecordReader.getRecordIdentifier</code>.
 </li>
-<li>Releasing the read-lock.</li>
+<li>Releasing the lock.</li>
 </ol>
 </p>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3301b92b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java
index ef1e80c..05f342b 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java
@@ -19,7 +19,9 @@ import static org.mockito.Mockito.when;
 
 import java.net.InetAddress;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 import java.util.Timer;
 
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -42,14 +44,17 @@ import org.mockito.Captor;
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
 
 @RunWith(MockitoJUnitRunner.class)
 public class TestLock {
 
-  private static final Table TABLE_1 = createTable("DB", "ONE");
-  private static final Table TABLE_2 = createTable("DB", "TWO");
-  private static final List<Table> TABLES = ImmutableList.of(TABLE_1, TABLE_2);
+  private static final Table SOURCE_TABLE_1 = createTable("DB", "SOURCE_1");
+  private static final Table SOURCE_TABLE_2 = createTable("DB", "SOURCE_2");
+  private static final Table SINK_TABLE = createTable("DB", "SINK");
+  private static final Set<Table> SOURCES = ImmutableSet.of(SOURCE_TABLE_1, SOURCE_TABLE_2);
+  private static final Set<Table> SINKS = ImmutableSet.of(SINK_TABLE);
+  private static final Set<Table> TABLES = ImmutableSet.of(SOURCE_TABLE_1, SOURCE_TABLE_2, SINK_TABLE);
   private static final long LOCK_ID = 42;
   private static final long TRANSACTION_ID = 109;
   private static final String USER = "ewest";
@@ -67,7 +72,8 @@ public class TestLock {
   @Captor
   private ArgumentCaptor<LockRequest> requestCaptor;
 
-  private Lock lock;
+  private Lock readLock;
+  private Lock writeLock;
   private HiveConf configuration = new HiveConf();
 
   @Before
@@ -79,44 +85,57 @@ public class TestLock {
         mockHeartbeatFactory.newInstance(any(IMetaStoreClient.class), any(LockFailureListener.class), any(Long.class),
             any(Collection.class), anyLong(), anyInt())).thenReturn(mockHeartbeat);
 
-    lock = new Lock(mockMetaStoreClient, mockHeartbeatFactory, configuration, mockListener, USER, TABLES, 3, 0);
+    readLock = new Lock(mockMetaStoreClient, mockHeartbeatFactory, configuration, mockListener, USER, SOURCES,
+        Collections.<Table> emptySet(), 3, 0);
+    writeLock = new Lock(mockMetaStoreClient, mockHeartbeatFactory, configuration, mockListener, USER, SOURCES, SINKS,
+        3, 0);
   }
 
   @Test
   public void testAcquireReadLockWithNoIssues() throws Exception {
-    lock.acquire();
-    assertEquals(Long.valueOf(LOCK_ID), lock.getLockId());
-    assertNull(lock.getTransactionId());
+    readLock.acquire();
+    assertEquals(Long.valueOf(LOCK_ID), readLock.getLockId());
+    assertNull(readLock.getTransactionId());
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testAcquireWriteLockWithoutTxn() throws Exception {
+    writeLock.acquire();
+  }
+  
+  @Test(expected = IllegalArgumentException.class)
+  public void testAcquireWriteLockWithInvalidTxn() throws Exception {
+    writeLock.acquire(0);
   }
 
   @Test
   public void testAcquireTxnLockWithNoIssues() throws Exception {
-    lock.acquire(TRANSACTION_ID);
-    assertEquals(Long.valueOf(LOCK_ID), lock.getLockId());
-    assertEquals(Long.valueOf(TRANSACTION_ID), lock.getTransactionId());
+    writeLock.acquire(TRANSACTION_ID);
+    assertEquals(Long.valueOf(LOCK_ID), writeLock.getLockId());
+    assertEquals(Long.valueOf(TRANSACTION_ID), writeLock.getTransactionId());
   }
 
   @Test
   public void testAcquireReadLockCheckHeartbeatCreated() throws Exception {
     configuration.set("hive.txn.timeout", "100s");
-    lock.acquire();
+    readLock.acquire();
 
-    verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), any(Long.class), eq(TABLES),
+    verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), any(Long.class), eq(SOURCES),
         eq(LOCK_ID), eq(75));
   }
 
   @Test
   public void testAcquireTxnLockCheckHeartbeatCreated() throws Exception {
     configuration.set("hive.txn.timeout", "100s");
-    lock.acquire(TRANSACTION_ID);
+    writeLock.acquire(TRANSACTION_ID);
 
-    verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), eq(TRANSACTION_ID), eq(TABLES),
-        eq(LOCK_ID), eq(75));
+    verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), eq(TRANSACTION_ID),
+        eq(TABLES), eq(LOCK_ID), eq(75));
   }
 
   @Test
   public void testAcquireLockCheckUser() throws Exception {
-    lock.acquire();
+    readLock.acquire();
     verify(mockMetaStoreClient).lock(requestCaptor.capture());
     LockRequest actualRequest = requestCaptor.getValue();
     assertEquals(USER, actualRequest.getUser());
@@ -124,7 +143,7 @@ public class TestLock {
 
   @Test
   public void testAcquireReadLockCheckLocks() throws Exception {
-    lock.acquire();
+    readLock.acquire();
     verify(mockMetaStoreClient).lock(requestCaptor.capture());
 
     LockRequest request = requestCaptor.getValue();
@@ -137,17 +156,17 @@ public class TestLock {
     assertEquals(2, components.size());
 
     LockComponent expected1 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
-    expected1.setTablename("ONE");
+    expected1.setTablename("SOURCE_1");
     assertTrue(components.contains(expected1));
 
     LockComponent expected2 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
-    expected2.setTablename("TWO");
+    expected2.setTablename("SOURCE_2");
     assertTrue(components.contains(expected2));
   }
 
   @Test
   public void testAcquireTxnLockCheckLocks() throws Exception {
-    lock.acquire(TRANSACTION_ID);
+    writeLock.acquire(TRANSACTION_ID);
     verify(mockMetaStoreClient).lock(requestCaptor.capture());
 
     LockRequest request = requestCaptor.getValue();
@@ -157,73 +176,77 @@ public class TestLock {
 
     List<LockComponent> components = request.getComponent();
 
-    System.out.println(components);
-    assertEquals(2, components.size());
+    assertEquals(3, components.size());
 
     LockComponent expected1 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
-    expected1.setTablename("ONE");
+    expected1.setTablename("SOURCE_1");
     assertTrue(components.contains(expected1));
 
     LockComponent expected2 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
-    expected2.setTablename("TWO");
+    expected2.setTablename("SOURCE_2");
     assertTrue(components.contains(expected2));
+
+    LockComponent expected3 = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "DB");
+    expected3.setTablename("SINK");
+    assertTrue(components.contains(expected3));
   }
 
   @Test(expected = LockException.class)
   public void testAcquireLockNotAcquired() throws Exception {
     when(mockLockResponse.getState()).thenReturn(NOT_ACQUIRED);
-    lock.acquire();
+    readLock.acquire();
   }
 
   @Test(expected = LockException.class)
   public void testAcquireLockAborted() throws Exception {
     when(mockLockResponse.getState()).thenReturn(ABORT);
-    lock.acquire();
+    readLock.acquire();
   }
 
   @Test(expected = LockException.class)
   public void testAcquireLockWithWaitRetriesExceeded() throws Exception {
     when(mockLockResponse.getState()).thenReturn(WAITING, WAITING, WAITING);
-    lock.acquire();
+    readLock.acquire();
   }
 
   @Test
   public void testAcquireLockWithWaitRetries() throws Exception {
     when(mockLockResponse.getState()).thenReturn(WAITING, WAITING, ACQUIRED);
-    lock.acquire();
-    assertEquals(Long.valueOf(LOCK_ID), lock.getLockId());
+    readLock.acquire();
+    assertEquals(Long.valueOf(LOCK_ID), readLock.getLockId());
   }
 
   @Test
   public void testReleaseLock() throws Exception {
-    lock.acquire();
-    lock.release();
+    readLock.acquire();
+    readLock.release();
     verify(mockMetaStoreClient).unlock(LOCK_ID);
   }
 
   @Test
   public void testReleaseLockNoLock() throws Exception {
-    lock.release();
+    readLock.release();
     verifyNoMoreInteractions(mockMetaStoreClient);
   }
 
   @Test
   public void testReleaseLockCancelsHeartbeat() throws Exception {
-    lock.acquire();
-    lock.release();
+    readLock.acquire();
+    readLock.release();
     verify(mockHeartbeat).cancel();
   }
 
   @Test
   public void testReadHeartbeat() throws Exception {
-    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, null, TABLES, LOCK_ID);
+    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, null, SOURCES, LOCK_ID);
     task.run();
     verify(mockMetaStoreClient).heartbeat(0, LOCK_ID);
   }
 
   @Test
   public void testTxnHeartbeat() throws Exception {
-    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
+    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
+        LOCK_ID);
     task.run();
     verify(mockMetaStoreClient).heartbeat(TRANSACTION_ID, LOCK_ID);
   }
@@ -232,43 +255,47 @@ public class TestLock {
   public void testReadHeartbeatFailsNoSuchLockException() throws Exception {
     Throwable t = new NoSuchLockException();
     doThrow(t).when(mockMetaStoreClient).heartbeat(0, LOCK_ID);
-    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, null, TABLES, LOCK_ID);
+    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, null, SOURCES, LOCK_ID);
     task.run();
-    verify(mockListener).lockFailed(LOCK_ID, null, Lock.asStrings(TABLES), t);
+    verify(mockListener).lockFailed(LOCK_ID, null, Lock.asStrings(SOURCES), t);
   }
 
   @Test
   public void testTxnHeartbeatFailsNoSuchLockException() throws Exception {
     Throwable t = new NoSuchLockException();
     doThrow(t).when(mockMetaStoreClient).heartbeat(TRANSACTION_ID, LOCK_ID);
-    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
+    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
+        LOCK_ID);
     task.run();
-    verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(TABLES), t);
+    verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(SOURCES), t);
   }
 
   @Test
   public void testHeartbeatFailsNoSuchTxnException() throws Exception {
     Throwable t = new NoSuchTxnException();
     doThrow(t).when(mockMetaStoreClient).heartbeat(TRANSACTION_ID, LOCK_ID);
-    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
+    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
+        LOCK_ID);
     task.run();
-    verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(TABLES), t);
+    verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(SOURCES), t);
   }
 
   @Test
   public void testHeartbeatFailsTxnAbortedException() throws Exception {
     Throwable t = new TxnAbortedException();
     doThrow(t).when(mockMetaStoreClient).heartbeat(TRANSACTION_ID, LOCK_ID);
-    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
+    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
+        LOCK_ID);
     task.run();
-    verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(TABLES), t);
+    verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(SOURCES), t);
   }
 
   @Test
   public void testHeartbeatContinuesTException() throws Exception {
     Throwable t = new TException();
     doThrow(t).when(mockMetaStoreClient).heartbeat(0, LOCK_ID);
-    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
+    HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
+        LOCK_ID);
     task.run();
     verifyZeroInteractions(mockListener);
   }


[48/50] [abbrv] hive git commit: HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d6ec52ee
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d6ec52ee
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d6ec52ee

Branch: refs/heads/beeline-cli
Commit: d6ec52ee094d94377442d96d450575462a9497b7
Parents: 7338d8e
Author: Alan Gates <ga...@hortonworks.com>
Authored: Wed Jul 15 17:23:23 2015 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Wed Jul 15 17:23:23 2015 -0700

----------------------------------------------------------------------
 .../results/positive/external_table_ppd.q.out   |   1 -
 .../positive/hbase_binary_storage_queries.q.out |   2 -
 .../src/test/results/positive/hbase_stats.q.out |   7 --
 .../test/results/positive/hbase_stats2.q.out    |   7 --
 .../test/results/positive/hbase_stats3.q.out    |  12 --
 .../positive/hbase_stats_empty_partition.q.out  |   2 -
 .../SemanticAnalysis/HCatSemanticAnalyzer.java  |   7 +-
 .../hive/hcatalog/api/HCatClientHMSImpl.java    |  14 +--
 .../hadoop/hive/metastore/HiveMetaStore.java    |  11 +-
 .../hive/metastore/HiveMetaStoreClient.java     |   7 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |  57 +++++-----
 .../hadoop/hive/metastore/MetaStoreUtils.java   |  26 -----
 .../hive/metastore/PartitionDropOptions.java    |   6 -
 .../hadoop/hive/metastore/ProtectMode.java      |  97 ----------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  50 ---------
 .../hadoop/hive/ql/hooks/WriteEntity.java       |   6 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  57 +++++-----
 .../hadoop/hive/ql/metadata/Partition.java      |  51 ---------
 .../apache/hadoop/hive/ql/metadata/Table.java   |  65 ++---------
 .../formatting/MetaDataFormatUtils.java         |  24 ++--
 .../hive/ql/parse/DDLSemanticAnalyzer.java      | 112 +++++--------------
 .../apache/hadoop/hive/ql/parse/EximUtil.java   |  31 ++---
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |   5 -
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |  38 +------
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |  11 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  42 +------
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |   8 +-
 .../hadoop/hive/ql/plan/AlterTableDesc.java     |  20 ++--
 .../hadoop/hive/ql/plan/DropTableDesc.java      |  26 +----
 .../hadoop/hive/ql/plan/HiveOperation.java      |   2 -
 .../alter_partition_invalidspec.q               |   8 --
 .../clientnegative/alter_partition_nodrop.q     |   9 --
 .../alter_partition_nodrop_table.q              |   9 --
 .../clientnegative/alter_partition_offline.q    |  11 --
 .../clientnegative/drop_table_failure3.q        |  12 --
 .../queries/clientnegative/protectmode_part.q   |  15 ---
 .../queries/clientnegative/protectmode_part1.q  |  21 ----
 .../queries/clientnegative/protectmode_part2.q  |   9 --
 .../clientnegative/protectmode_part_no_drop.q   |  10 --
 .../clientnegative/protectmode_part_no_drop2.q  |  11 --
 .../queries/clientnegative/protectmode_tbl1.q   |   8 --
 .../queries/clientnegative/protectmode_tbl2.q   |  12 --
 .../queries/clientnegative/protectmode_tbl3.q   |  10 --
 .../queries/clientnegative/protectmode_tbl4.q   |  15 ---
 .../queries/clientnegative/protectmode_tbl5.q   |  15 ---
 .../queries/clientnegative/protectmode_tbl6.q   |   8 --
 .../queries/clientnegative/protectmode_tbl7.q   |  13 ---
 .../queries/clientnegative/protectmode_tbl8.q   |  13 ---
 .../clientnegative/protectmode_tbl_no_drop.q    |   9 --
 .../test/queries/clientnegative/sa_fail_hook3.q |   4 -
 .../alter_partition_protect_mode.q              |  26 -----
 .../drop_partitions_ignore_protection.q         |  10 --
 .../test/queries/clientpositive/protectmode.q   |  63 -----------
 .../test/queries/clientpositive/protectmode2.q  |  23 ----
 .../alter_numbuckets_partitioned_table.q.out    |   8 --
 .../results/beelinepositive/create_like.q.out   |   3 -
 .../results/beelinepositive/create_like2.q.out  |   1 -
 .../beelinepositive/create_like_view.q.out      |   4 -
 .../beelinepositive/create_skewed_table1.q.out  |   3 -
 .../results/beelinepositive/create_view.q.out   |  14 ---
 .../create_view_partitioned.q.out               |   3 -
 ql/src/test/results/beelinepositive/ctas.q.out  |   5 -
 .../describe_formatted_view_partitioned.q.out   |   1 -
 .../beelinepositive/describe_table.q.out        |   3 -
 .../test/results/beelinepositive/merge3.q.out   |   1 -
 .../part_inherit_tbl_props.q.out                |   1 -
 .../part_inherit_tbl_props_empty.q.out          |   1 -
 .../part_inherit_tbl_props_with_star.q.out      |   1 -
 .../results/beelinepositive/protectmode2.q.out  |   2 -
 .../test/results/beelinepositive/stats1.q.out   |   2 -
 .../test/results/beelinepositive/stats10.q.out  |   3 -
 .../test/results/beelinepositive/stats11.q.out  |   4 -
 .../test/results/beelinepositive/stats12.q.out  |   5 -
 .../test/results/beelinepositive/stats13.q.out  |   6 -
 .../test/results/beelinepositive/stats14.q.out  |   5 -
 .../test/results/beelinepositive/stats15.q.out  |   5 -
 .../test/results/beelinepositive/stats16.q.out  |   2 -
 .../test/results/beelinepositive/stats18.q.out  |   2 -
 .../test/results/beelinepositive/stats2.q.out   |   2 -
 .../test/results/beelinepositive/stats3.q.out   |   2 -
 .../test/results/beelinepositive/stats4.q.out   |   6 -
 .../test/results/beelinepositive/stats5.q.out   |   1 -
 .../test/results/beelinepositive/stats6.q.out   |   5 -
 .../test/results/beelinepositive/stats7.q.out   |   3 -
 .../test/results/beelinepositive/stats8.q.out   |  10 --
 .../test/results/beelinepositive/stats9.q.out   |   1 -
 .../beelinepositive/stats_empty_partition.q.out |   1 -
 .../clientnegative/alter_file_format.q.out      |   1 -
 .../alter_view_as_select_with_partition.q.out   |   1 -
 .../stats_partialscan_autogether.q.out          |   2 -
 .../clientpositive/alter_file_format.q.out      |  19 ----
 .../clientpositive/alter_merge_stats_orc.q.out  |   8 --
 .../alter_numbuckets_partitioned_table.q.out    |  16 ---
 .../alter_numbuckets_partitioned_table2.q.out   |  27 -----
 ...lter_numbuckets_partitioned_table2_h23.q.out |  27 -----
 ...alter_numbuckets_partitioned_table_h23.q.out |  16 ---
 .../alter_partition_clusterby_sortby.q.out      |   7 --
 .../clientpositive/alter_skewed_table.q.out     |   6 -
 .../clientpositive/alter_table_not_sorted.q.out |   2 -
 .../clientpositive/alter_table_serde2.q.out     |   6 -
 .../clientpositive/alter_view_as_select.q.out   |   3 -
 .../clientpositive/authorization_index.q.out    |   1 -
 .../test/results/clientpositive/bucket5.q.out   |   1 -
 .../create_alter_list_bucketing_table1.q.out    |   7 --
 .../results/clientpositive/create_like.q.out    |   9 --
 .../results/clientpositive/create_like2.q.out   |   1 -
 .../clientpositive/create_like_tbl_props.q.out  |   5 -
 .../clientpositive/create_like_view.q.out       |   4 -
 .../clientpositive/create_or_replace_view.q.out |   5 -
 .../clientpositive/create_skewed_table1.q.out   |   3 -
 .../results/clientpositive/create_view.q.out    |  14 ---
 .../create_view_partitioned.q.out               |   3 -
 .../clientpositive/create_view_translate.q.out  |   2 -
 ql/src/test/results/clientpositive/ctas.q.out   |   5 -
 .../results/clientpositive/ctas_colname.q.out   |   7 --
 .../results/clientpositive/ctas_hadoop20.q.out  |   5 -
 .../ctas_uses_database_location.q.out           |   1 -
 .../clientpositive/database_location.q.out      |   2 -
 .../results/clientpositive/decimal_serde.q.out  |   2 -
 .../clientpositive/default_file_format.q.out    |   5 -
 .../describe_comment_indent.q.out               |   1 -
 .../describe_comment_nonascii.q.out             |   1 -
 .../describe_formatted_view_partitioned.q.out   |   2 -
 .../clientpositive/describe_syntax.q.out        |   6 -
 .../results/clientpositive/describe_table.q.out |   7 --
 .../dynpart_sort_opt_vectorization.q.out        |  32 ------
 .../dynpart_sort_optimization.q.out             |  32 ------
 .../dynpart_sort_optimization2.q.out            |  24 ----
 .../encrypted/encryption_insert_values.q.out    |   1 -
 .../clientpositive/exim_hidden_files.q.out      |   1 -
 .../clientpositive/index_skewtable.q.out        |   1 -
 .../clientpositive/infer_bucket_sort.q.out      |  50 ---------
 .../infer_bucket_sort_bucketed_table.q.out      |   2 -
 .../infer_bucket_sort_convert_join.q.out        |   4 -
 .../infer_bucket_sort_dyn_part.q.out            |  16 ---
 .../infer_bucket_sort_grouping_operators.q.out  |  12 --
 .../infer_bucket_sort_list_bucket.q.out         |   4 -
 .../infer_bucket_sort_map_operators.q.out       |   8 --
 .../infer_bucket_sort_merge.q.out               |   4 -
 .../infer_bucket_sort_multi_insert.q.out        |  16 ---
 .../infer_bucket_sort_num_buckets.q.out         |   4 -
 .../infer_bucket_sort_reducers_power_two.q.out  |  12 --
 .../results/clientpositive/lb_fs_stats.q.out    |   2 -
 .../clientpositive/list_bucket_dml_1.q.out      |   4 -
 .../list_bucket_dml_10.q.java1.7.out            |   2 -
 .../list_bucket_dml_10.q.java1.8.out            |   2 -
 .../list_bucket_dml_11.q.java1.7.out            |   2 -
 .../list_bucket_dml_11.q.java1.8.out            |   2 -
 .../list_bucket_dml_12.q.java1.7.out            |   2 -
 .../list_bucket_dml_12.q.java1.8.out            |   2 -
 .../list_bucket_dml_13.q.java1.7.out            |   2 -
 .../list_bucket_dml_13.q.java1.8.out            |   2 -
 .../clientpositive/list_bucket_dml_14.q.out     |   1 -
 .../list_bucket_dml_2.q.java1.7.out             |   2 -
 .../list_bucket_dml_2.q.java1.8.out             |   2 -
 .../clientpositive/list_bucket_dml_3.q.out      |   2 -
 .../list_bucket_dml_4.q.java1.7.out             |   4 -
 .../list_bucket_dml_4.q.java1.8.out             |   4 -
 .../list_bucket_dml_5.q.java1.7.out             |   4 -
 .../list_bucket_dml_5.q.java1.8.out             |   4 -
 .../list_bucket_dml_6.q.java1.7.out             |   8 --
 .../list_bucket_dml_6.q.java1.8.out             |   8 --
 .../clientpositive/list_bucket_dml_7.q.out      |   8 --
 .../list_bucket_dml_8.q.java1.7.out             |   6 -
 .../list_bucket_dml_8.q.java1.8.out             |   6 -
 .../list_bucket_dml_9.q.java1.7.out             |   4 -
 .../list_bucket_dml_9.q.java1.8.out             |   4 -
 .../list_bucket_query_multiskew_1.q.out         |   2 -
 .../list_bucket_query_multiskew_2.q.out         |   2 -
 .../list_bucket_query_multiskew_3.q.out         |   6 -
 .../list_bucket_query_oneskew_1.q.out           |   2 -
 .../list_bucket_query_oneskew_2.q.out           |   2 -
 .../list_bucket_query_oneskew_3.q.out           |   2 -
 ql/src/test/results/clientpositive/merge3.q.out |   1 -
 .../results/clientpositive/orc_analyze.q.out    |  48 --------
 .../results/clientpositive/orc_create.q.out     |   6 -
 .../clientpositive/parallel_orderby.q.out       |   2 -
 .../parquet_array_null_element.q.out            |   1 -
 .../results/clientpositive/parquet_create.q.out |   1 -
 .../clientpositive/parquet_partitioned.q.out    |   1 -
 .../results/clientpositive/parquet_serde.q.out  |   5 -
 .../clientpositive/part_inherit_tbl_props.q.out |   2 -
 .../part_inherit_tbl_props_empty.q.out          |   2 -
 .../part_inherit_tbl_props_with_star.q.out      |   2 -
 .../partition_coltype_literals.q.out            |  16 ---
 .../results/clientpositive/protectmode2.q.out   |   2 -
 .../clientpositive/rcfile_default_format.q.out  |   8 --
 .../clientpositive/selectDistinctStar.q.out     |   2 -
 .../spark/alter_merge_stats_orc.q.out           |   8 --
 .../results/clientpositive/spark/bucket5.q.out  |   1 -
 .../results/clientpositive/spark/ctas.q.out     |   5 -
 .../infer_bucket_sort_bucketed_table.q.out      |   2 -
 .../spark/infer_bucket_sort_convert_join.q.out  |   4 -
 .../spark/infer_bucket_sort_map_operators.q.out |   8 --
 .../spark/infer_bucket_sort_merge.q.out         |   4 -
 .../spark/infer_bucket_sort_num_buckets.q.out   |   4 -
 .../infer_bucket_sort_reducers_power_two.q.out  |  12 --
 .../spark/list_bucket_dml_10.q.java1.7.out      |   2 -
 .../spark/list_bucket_dml_10.q.java1.8.out      |   2 -
 .../spark/list_bucket_dml_2.q.java1.7.out       |   2 -
 .../spark/list_bucket_dml_2.q.java1.8.out       |   2 -
 .../spark/list_bucket_dml_2.q.out               | Bin 28747 -> 28667 bytes
 .../clientpositive/spark/orc_analyze.q.out      |  22 ----
 .../clientpositive/spark/parallel_orderby.q.out |   2 -
 .../results/clientpositive/spark/stats1.q.out   |   2 -
 .../results/clientpositive/spark/stats10.q.out  |   5 -
 .../results/clientpositive/spark/stats12.q.out  |   9 --
 .../results/clientpositive/spark/stats13.q.out  |  10 --
 .../results/clientpositive/spark/stats14.q.out  |   7 --
 .../results/clientpositive/spark/stats15.q.out  |   7 --
 .../results/clientpositive/spark/stats16.q.out  |   2 -
 .../results/clientpositive/spark/stats18.q.out  |   4 -
 .../results/clientpositive/spark/stats2.q.out   |   2 -
 .../results/clientpositive/spark/stats20.q.out  |   2 -
 .../results/clientpositive/spark/stats3.q.out   |   2 -
 .../results/clientpositive/spark/stats5.q.out   |   1 -
 .../results/clientpositive/spark/stats6.q.out   |   9 --
 .../results/clientpositive/spark/stats7.q.out   |   5 -
 .../results/clientpositive/spark/stats8.q.out   |  18 ---
 .../results/clientpositive/spark/stats9.q.out   |   1 -
 .../clientpositive/spark/stats_counter.q.out    |   2 -
 .../spark/stats_counter_partitioned.q.out       |  16 ---
 .../clientpositive/spark/stats_noscan_1.q.out   |  17 ---
 .../clientpositive/spark/stats_noscan_2.q.out   |   6 -
 .../clientpositive/spark/stats_only_null.q.out  |   4 -
 .../spark/stats_partscan_1_23.q.out             |   6 -
 .../results/clientpositive/spark/statsfs.q.out  |  14 ---
 .../clientpositive/spark/union_remove_1.q.out   |   1 -
 .../clientpositive/spark/union_remove_10.q.out  |   1 -
 .../clientpositive/spark/union_remove_11.q.out  |   1 -
 .../clientpositive/spark/union_remove_12.q.out  |   1 -
 .../clientpositive/spark/union_remove_13.q.out  |   1 -
 .../clientpositive/spark/union_remove_14.q.out  |   1 -
 .../clientpositive/spark/union_remove_15.q.out  |   1 -
 .../clientpositive/spark/union_remove_16.q.out  |   1 -
 .../clientpositive/spark/union_remove_17.q.out  |   1 -
 .../clientpositive/spark/union_remove_18.q.out  |   1 -
 .../clientpositive/spark/union_remove_19.q.out  |   1 -
 .../clientpositive/spark/union_remove_2.q.out   |   1 -
 .../clientpositive/spark/union_remove_20.q.out  |   1 -
 .../clientpositive/spark/union_remove_21.q.out  |   1 -
 .../clientpositive/spark/union_remove_22.q.out  |   1 -
 .../clientpositive/spark/union_remove_23.q.out  |   1 -
 .../clientpositive/spark/union_remove_24.q.out  |   1 -
 .../clientpositive/spark/union_remove_25.q.out  |   6 -
 .../clientpositive/spark/union_remove_3.q.out   |   1 -
 .../clientpositive/spark/union_remove_4.q.out   |   1 -
 .../clientpositive/spark/union_remove_5.q.out   |   1 -
 .../clientpositive/spark/union_remove_7.q.out   |   1 -
 .../clientpositive/spark/union_remove_8.q.out   |   1 -
 .../clientpositive/spark/union_remove_9.q.out   |   1 -
 ql/src/test/results/clientpositive/stats1.q.out |   2 -
 .../test/results/clientpositive/stats10.q.out   |   5 -
 .../test/results/clientpositive/stats11.q.out   |   8 --
 .../test/results/clientpositive/stats12.q.out   |   9 --
 .../test/results/clientpositive/stats13.q.out   |  10 --
 .../test/results/clientpositive/stats14.q.out   |   7 --
 .../test/results/clientpositive/stats15.q.out   |   7 --
 .../test/results/clientpositive/stats16.q.out   |   2 -
 .../test/results/clientpositive/stats18.q.out   |   4 -
 .../test/results/clientpositive/stats19.q.out   |  12 --
 ql/src/test/results/clientpositive/stats2.q.out |   2 -
 .../test/results/clientpositive/stats20.q.out   |   2 -
 ql/src/test/results/clientpositive/stats3.q.out |   2 -
 ql/src/test/results/clientpositive/stats4.q.out |  10 --
 ql/src/test/results/clientpositive/stats5.q.out |   1 -
 ql/src/test/results/clientpositive/stats6.q.out |   9 --
 ql/src/test/results/clientpositive/stats7.q.out |   5 -
 ql/src/test/results/clientpositive/stats8.q.out |  18 ---
 ql/src/test/results/clientpositive/stats9.q.out |   1 -
 .../results/clientpositive/stats_counter.q.out  |   2 -
 .../stats_counter_partitioned.q.out             |  16 ---
 .../clientpositive/stats_empty_partition.q.out  |   2 -
 .../clientpositive/stats_invalidation.q.out     |   2 -
 .../stats_list_bucket.q.java1.7.out             |   3 -
 .../stats_list_bucket.q.java1.8.out             |   3 -
 .../results/clientpositive/stats_noscan_1.q.out |  17 ---
 .../results/clientpositive/stats_noscan_2.q.out |   6 -
 .../clientpositive/stats_only_null.q.out        |   4 -
 .../clientpositive/stats_partscan_1.q.out       |   6 -
 .../clientpositive/stats_partscan_1_23.q.out    |   6 -
 .../test/results/clientpositive/statsfs.q.out   |  14 ---
 .../tez/alter_merge_stats_orc.q.out             |   8 --
 .../test/results/clientpositive/tez/ctas.q.out  |   5 -
 .../tez/dynpart_sort_opt_vectorization.q.out    |  32 ------
 .../tez/dynpart_sort_optimization.q.out         |  32 ------
 .../tez/dynpart_sort_optimization2.q.out        |  24 ----
 .../clientpositive/tez/orc_analyze.q.out        |  48 --------
 .../clientpositive/tez/selectDistinctStar.q.out |   2 -
 .../clientpositive/tez/stats_counter.q.out      |   2 -
 .../tez/stats_counter_partitioned.q.out         |  16 ---
 .../clientpositive/tez/stats_noscan_1.q.out     |  17 ---
 .../clientpositive/tez/stats_only_null.q.out    |   4 -
 .../results/clientpositive/tez/tez_fsstat.q.out |   2 -
 .../clientpositive/truncate_column.q.out        |  11 --
 .../clientpositive/unicode_notation.q.out       |   3 -
 .../results/clientpositive/union_remove_1.q.out |   1 -
 .../clientpositive/union_remove_10.q.out        |   1 -
 .../clientpositive/union_remove_11.q.out        |   1 -
 .../clientpositive/union_remove_12.q.out        |   1 -
 .../clientpositive/union_remove_13.q.out        |   1 -
 .../clientpositive/union_remove_14.q.out        |   1 -
 .../clientpositive/union_remove_15.q.out        |   1 -
 .../clientpositive/union_remove_16.q.out        |   1 -
 .../clientpositive/union_remove_17.q.out        |   1 -
 .../clientpositive/union_remove_18.q.out        |   1 -
 .../clientpositive/union_remove_19.q.out        |   1 -
 .../results/clientpositive/union_remove_2.q.out |   1 -
 .../clientpositive/union_remove_20.q.out        |   1 -
 .../clientpositive/union_remove_21.q.out        |   1 -
 .../clientpositive/union_remove_22.q.out        |   1 -
 .../clientpositive/union_remove_23.q.out        |   1 -
 .../clientpositive/union_remove_24.q.out        |   1 -
 .../clientpositive/union_remove_25.q.out        |   6 -
 .../results/clientpositive/union_remove_3.q.out |   1 -
 .../results/clientpositive/union_remove_4.q.out |   1 -
 .../results/clientpositive/union_remove_5.q.out |   1 -
 .../results/clientpositive/union_remove_7.q.out |   1 -
 .../results/clientpositive/union_remove_8.q.out |   1 -
 .../results/clientpositive/union_remove_9.q.out |   1 -
 320 files changed, 150 insertions(+), 2473 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/external_table_ppd.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/external_table_ppd.q.out b/hbase-handler/src/test/results/positive/external_table_ppd.q.out
index 57424ce..83eb2f5 100644
--- a/hbase-handler/src/test/results/positive/external_table_ppd.q.out
+++ b/hbase-handler/src/test/results/positive/external_table_ppd.q.out
@@ -52,7 +52,6 @@ boolean_col         	boolean             	from deserializer
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out b/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
index 578ddb2..f212331 100644
--- a/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
@@ -52,7 +52,6 @@ boolean_col         	boolean             	from deserializer
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -228,7 +227,6 @@ boolean_col         	boolean             	from deserializer
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	EXTERNAL_TABLE      	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_stats.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_stats.q.out b/hbase-handler/src/test/results/positive/hbase_stats.q.out
index f12b136..f34720d 100644
--- a/hbase-handler/src/test/results/positive/hbase_stats.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_stats.q.out
@@ -38,7 +38,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -142,7 +141,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -181,8 +179,6 @@ Partition Value:    	[2010-04-08, 11]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -223,8 +219,6 @@ Partition Value:    	[2010-04-08, 12]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -283,7 +277,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_stats2.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_stats2.q.out b/hbase-handler/src/test/results/positive/hbase_stats2.q.out
index a60dee2..aad2e3a 100644
--- a/hbase-handler/src/test/results/positive/hbase_stats2.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_stats2.q.out
@@ -38,7 +38,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -142,7 +141,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -181,8 +179,6 @@ Partition Value:    	[2010-04-08, 11]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -223,8 +219,6 @@ Partition Value:    	[2010-04-08, 12]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -283,7 +277,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_stats3.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_stats3.q.out b/hbase-handler/src/test/results/positive/hbase_stats3.q.out
index 114847c..063800f 100644
--- a/hbase-handler/src/test/results/positive/hbase_stats3.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_stats3.q.out
@@ -40,8 +40,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -94,8 +92,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -148,8 +144,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -206,8 +200,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -260,8 +252,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -314,8 +304,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out b/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out
index 65e0a6f..c13817e 100644
--- a/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out
@@ -43,8 +43,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tmptable            	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
index 11d0743..18bf172 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
@@ -18,9 +18,6 @@
  */
 package org.apache.hive.hcatalog.cli.SemanticAnalysis;
 
-import java.io.Serializable;
-import java.util.List;
-
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -50,6 +47,9 @@ import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hive.hcatalog.common.ErrorType;
 import org.apache.hive.hcatalog.common.HCatException;
 
+import java.io.Serializable;
+import java.util.List;
+
 public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
 
   private AbstractSemanticAnalyzerHook hook;
@@ -237,7 +237,6 @@ public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
           case HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION:
           case HiveParser.TOK_ALTERTABLE_SKEWED:
           case HiveParser.TOK_ALTERTABLE_FILEFORMAT:
-          case HiveParser.TOK_ALTERTABLE_PROTECTMODE:
           case HiveParser.TOK_ALTERTABLE_LOCATION:
           case HiveParser.TOK_ALTERTABLE_MERGEFILES:
           case HiveParser.TOK_ALTERTABLE_RENAMEPART:

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
index 3a69581..41571fc 100644
--- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
+++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
@@ -18,13 +18,6 @@
  */
 package org.apache.hive.hcatalog.api;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
 import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -76,6 +69,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
 /**
  * The HCatClientHMSImpl is the Hive Metastore client based implementation of
@@ -588,7 +587,6 @@ public class HCatClientHMSImpl extends HCatClient {
             Utilities.serializeExpressionToKryo(partitionExpression));
     hmsClient.dropPartitions(table.getDbName(), table.getTableName(), Arrays.asList(serializedPartitionExpression),
         deleteData && !isExternal(table),  // Delete data?
-        false,                             // Ignore Protection?
         ifExists,                          // Fail if table doesn't exist?
         false);                            // Need results back?
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 2ef5aa0..0edf11f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -230,7 +230,9 @@ import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
 
 import static org.apache.commons.lang.StringUtils.join;
-import static org.apache.hadoop.hive.metastore.MetaStoreUtils.*;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName;
 
 /**
  * TODO:pc remove application logic to a separate interface.
@@ -2774,10 +2776,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         for (Partition part : parts) {
-          if (!ignoreProtection && !MetaStoreUtils.canDropPartition(tbl, part)) {
-            throw new MetaException("Table " + tbl.getTableName()
-                + " Partition " + part + " is protected from being dropped");
-          }
+
+          // TODO - we need to speed this up for the normal path where all partitions are under
+          // the table and we don't have to stat every partition
 
           firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
           if (colNames != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index a5f5053..66fbfe4 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -842,7 +842,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
     rps.setExprs(exprs);
     DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
     req.setDeleteData(options.deleteData);
-    req.setIgnoreProtection(options.ignoreProtection);
     req.setNeedResult(options.returnResults);
     req.setIfExists(options.ifExists);
     if (options.purgeData) {
@@ -854,13 +853,12 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
 
   @Override
   public List<Partition> dropPartitions(String dbName, String tblName,
-      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
       boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
 
     return dropPartitions(dbName, tblName, partExprs,
                           PartitionDropOptions.instance()
                                               .deleteData(deleteData)
-                                              .ignoreProtection(ignoreProtection)
                                               .ifExists(ifExists)
                                               .returnResults(needResult));
 
@@ -868,13 +866,12 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
 
   @Override
   public List<Partition> dropPartitions(String dbName, String tblName,
-      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
       boolean ifExists) throws NoSuchObjectException, MetaException, TException {
     // By default, we need the results from dropPartitions();
     return dropPartitions(dbName, tblName, partExprs,
                           PartitionDropOptions.instance()
                                               .deleteData(deleteData)
-                                              .ignoreProtection(ignoreProtection)
                                               .ifExists(ifExists));
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 341b0ca..147ffcc 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -19,50 +19,30 @@
 package org.apache.hadoop.hive.metastore;
 
 
+import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FireEventRequest;
-import org.apache.hadoop.hive.metastore.api.FireEventResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
-import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
-import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
-import org.apache.hadoop.hive.metastore.api.TxnOpenException;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.thrift.TException;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public;
 import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FireEventRequest;
+import org.apache.hadoop.hive.metastore.api.FireEventResponse;
 import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
 import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.Index;
@@ -70,8 +50,15 @@ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
@@ -79,10 +66,20 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnOpenException;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.thrift.TException;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
 
 /**
  * Wrapper around hive metastore thrift api
@@ -684,11 +681,11 @@ public interface IMetaStoreClient {
                         PartitionDropOptions options) throws TException;
 
   List<Partition> dropPartitions(String dbName, String tblName,
-      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
       boolean ifExists) throws NoSuchObjectException, MetaException, TException;
 
   List<Partition> dropPartitions(String dbName, String tblName,
-      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
       boolean ifExists, boolean needResults) throws NoSuchObjectException, MetaException, TException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 38dc406..907cbbf 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -1604,32 +1604,6 @@ public class MetaStoreUtils {
     return null;
   }
 
-  public static ProtectMode getProtectMode(Partition partition) {
-    return getProtectMode(partition.getParameters());
-  }
-
-  public static ProtectMode getProtectMode(Table table) {
-    return getProtectMode(table.getParameters());
-  }
-
-  private static ProtectMode getProtectMode(Map<String, String> parameters) {
-    if (parameters == null) {
-      return null;
-    }
-
-    if (!parameters.containsKey(ProtectMode.PARAMETER_NAME)) {
-      return new ProtectMode();
-    } else {
-      return ProtectMode.getProtectModeFromString(parameters.get(ProtectMode.PARAMETER_NAME));
-    }
-  }
-
-  public static boolean canDropPartition(Table table, Partition partition) {
-    ProtectMode mode = getProtectMode(partition);
-    ProtectMode parentMode = getProtectMode(table);
-    return (!mode.noDrop && !mode.offline && !mode.readOnly && !parentMode.noDropCascade);
-  }
-
   public static String ARCHIVING_LEVEL = "archiving_level";
   public static int getArchivingLevel(Partition part) throws MetaException {
     if (!isArchived(part)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
index 5b2811f..e8ffbd5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
@@ -24,7 +24,6 @@ package org.apache.hadoop.hive.metastore;
 public class PartitionDropOptions {
 
   public boolean deleteData = true;
-  public boolean ignoreProtection = false;
   public boolean ifExists = false;
   public boolean returnResults = true;
   public boolean purgeData = false;
@@ -36,11 +35,6 @@ public class PartitionDropOptions {
     return this;
   }
 
-  public PartitionDropOptions ignoreProtection(boolean ignoreProtection) {
-    this.ignoreProtection = ignoreProtection;
-    return this;
-  }
-
   public PartitionDropOptions ifExists(boolean ifExists) {
     this.ifExists = ifExists;
     return this;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java
deleted file mode 100644
index b8f1390..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-
-public class ProtectMode {
-  public static String PARAMETER_NAME = "PROTECT_MODE";
-
-  public static String FLAG_OFFLINE = "OFFLINE";
-  public static String FLAG_NO_DROP = "NO_DROP";
-  public static String FLAG_NO_DROP_CASCADE = "NO_DROP_CASCADE";
-  public static String FLAG_READ_ONLY = "READ_ONLY";
-
-  public boolean offline = false;
-  public boolean readOnly = false;
-  public boolean noDrop = false;
-  public boolean noDropCascade = false;
-
-  static public ProtectMode getProtectModeFromString(String sourceString) {
-    return new ProtectMode(sourceString);
-  }
-
-  private ProtectMode(String sourceString) {
-    String[] tokens = sourceString.split(",");
-    for (String token: tokens) {
-      if (token.equalsIgnoreCase(FLAG_OFFLINE)) {
-        offline = true;
-      } else if (token.equalsIgnoreCase(FLAG_NO_DROP)) {
-        noDrop = true;
-      } else if (token.equalsIgnoreCase(FLAG_NO_DROP_CASCADE)) {
-        noDropCascade = true;
-      } else if (token.equalsIgnoreCase(FLAG_READ_ONLY)) {
-        readOnly = true;
-      }
-    }
-  }
-
-  public ProtectMode() {
-  }
-
-  @Override
-  public String toString() {
-    String retString = null;
-
-    if (offline) {
-        retString = FLAG_OFFLINE;
-    }
-
-    if (noDrop) {
-      if (retString != null) {
-        retString = retString + "," + FLAG_NO_DROP;
-      }
-      else
-      {
-        retString = FLAG_NO_DROP;
-      }
-    }
-
-    if (noDropCascade) {
-      if (retString != null) {
-        retString = retString + "," + FLAG_NO_DROP_CASCADE;
-      }
-      else
-      {
-        retString = FLAG_NO_DROP_CASCADE;
-      }
-    }
-
-    if (readOnly) {
-      if (retString != null) {
-        retString = retString + "," + FLAG_READ_ONLY;
-      }
-      else
-      {
-        retString = FLAG_READ_ONLY;
-      }
-    }
-
-    return retString;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index a8c6aca..734742c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.PartitionDropOptions;
-import org.apache.hadoop.hive.metastore.ProtectMode;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
@@ -3210,17 +3209,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return builder;
   }
 
-  private void setAlterProtectMode(boolean protectModeEnable,
-      AlterTableDesc.ProtectModeType protectMode,
-      ProtectMode mode) {
-    if (protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
-      mode.offline = protectModeEnable;
-    } else if (protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
-      mode.noDrop = protectModeEnable;
-    } else if (protectMode == AlterTableDesc.ProtectModeType.NO_DROP_CASCADE) {
-      mode.noDropCascade = protectModeEnable;
-    }
-  }
   /**
    * Alter a given table.
    *
@@ -3453,20 +3441,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       if (alterTbl.getSerdeName() != null) {
         sd.getSerdeInfo().setSerializationLib(alterTbl.getSerdeName());
       }
-    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
-      boolean protectModeEnable = alterTbl.isProtectModeEnable();
-      AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType();
-
-      ProtectMode mode = null;
-      if (part != null) {
-        mode = part.getProtectMode();
-        setAlterProtectMode(protectModeEnable, protectMode, mode);
-        part.setProtectMode(mode);
-      } else {
-        mode = tbl.getProtectMode();
-        setAlterProtectMode(protectModeEnable,protectMode, mode);
-        tbl.setProtectMode(mode);
-      }
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
       StorageDescriptor sd = (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd());
       // validate sort columns and bucket columns
@@ -3635,7 +3609,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
                             dropTbl.getPartSpecs(),
                             PartitionDropOptions.instance()
                                                 .deleteData(true)
-                                                .ignoreProtection(dropTbl.getIgnoreProtection())
                                                 .ifExists(true)
                                                 .purgeData(dropTbl.getIfPurge()));
     for (Partition partition : droppedParts) {
@@ -3666,11 +3639,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       }
     }
 
-    if (tbl != null && !tbl.canDrop()) {
-      throw new HiveException("Table " + tbl.getTableName() +
-          " is protected from being dropped");
-    }
-
     ReplicationSpec replicationSpec = dropTbl.getReplicationSpec();
     if ((tbl!= null) && replicationSpec.isInReplicationScope()){
       /**
@@ -3714,24 +3682,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     int partitionBatchSize = HiveConf.getIntVar(conf,
         ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX);
 
-    // We should check that all the partitions of the table can be dropped
-    if (tbl != null && tbl.isPartitioned()) {
-      List<String> partitionNames = db.getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short)-1);
-
-      for(int i=0; i < partitionNames.size(); i+= partitionBatchSize) {
-        List<String> partNames = partitionNames.subList(i, Math.min(i+partitionBatchSize,
-            partitionNames.size()));
-        List<Partition> listPartitions = db.getPartitionsByNames(tbl, partNames);
-        for (Partition p: listPartitions) {
-          if (!p.canDrop()) {
-            throw new HiveException("Table " + tbl.getTableName() +
-                " Partition" + p.getName() +
-                " is protected from being dropped");
-          }
-        }
-      }
-    }
-
     // drop the table
     db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge());
     if (tbl != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
index 968c1e1..298e7f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.hooks;
 
-import java.io.Serializable;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
@@ -29,6 +27,8 @@ import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 
+import java.io.Serializable;
+
 /**
  * This class encapsulates an object that is being written to by the query. This
  * object may be a table, partition, dfs directory or a local directory.
@@ -193,8 +193,6 @@ public class WriteEntity extends Entity implements Serializable {
       case REPLACECOLS:
       case ARCHIVE:
       case UNARCHIVE:
-      case ALTERPROTECTMODE:
-      case ALTERPARTITIONPROTECTMODE:
       case ALTERLOCATION:
       case DROPPARTITION:
       case RENAMEPARTITION:

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index d89aafc..00125fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -18,30 +18,7 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
-import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
-import static org.apache.hadoop.hive.serde.serdeConstants.COLLECTION_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.ESCAPE_CHAR;
-import static org.apache.hadoop.hive.serde.serdeConstants.FIELD_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.LINE_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT;
-import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
+import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -122,7 +99,29 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
 
-import com.google.common.collect.Sets;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
+import static org.apache.hadoop.hive.serde.serdeConstants.COLLECTION_DELIM;
+import static org.apache.hadoop.hive.serde.serdeConstants.ESCAPE_CHAR;
+import static org.apache.hadoop.hive.serde.serdeConstants.FIELD_DELIM;
+import static org.apache.hadoop.hive.serde.serdeConstants.LINE_DELIM;
+import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM;
+import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT;
+import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
 
 
 /**
@@ -1979,19 +1978,17 @@ private void constructOneLBLocationMap(FileStatus fSta,
   }
 
   public List<Partition> dropPartitions(String tblName, List<DropTableDesc.PartSpec> partSpecs,
-      boolean deleteData, boolean ignoreProtection, boolean ifExists) throws HiveException {
+      boolean deleteData, boolean ifExists) throws HiveException {
     String[] names = Utilities.getDbTableName(tblName);
-    return dropPartitions(
-        names[0], names[1], partSpecs, deleteData, ignoreProtection, ifExists);
+    return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists);
   }
 
   public List<Partition> dropPartitions(String dbName, String tblName,
-      List<DropTableDesc.PartSpec> partSpecs,  boolean deleteData, boolean ignoreProtection,
+      List<DropTableDesc.PartSpec> partSpecs,  boolean deleteData,
       boolean ifExists) throws HiveException {
     return dropPartitions(dbName, tblName, partSpecs,
                           PartitionDropOptions.instance()
                                               .deleteData(deleteData)
-                                              .ignoreProtection(ignoreProtection)
                                               .ifExists(ifExists));
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
index 08ff2e9..2e77bc4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.ProtectMode;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -541,56 +540,6 @@ public class Partition implements Serializable {
   }
 
   /**
-   * @param protectMode
-   */
-  public void setProtectMode(ProtectMode protectMode){
-    Map<String, String> parameters = tPartition.getParameters();
-    String pm = protectMode.toString();
-    if (pm != null) {
-      parameters.put(ProtectMode.PARAMETER_NAME, pm);
-    } else {
-      parameters.remove(ProtectMode.PARAMETER_NAME);
-    }
-    tPartition.setParameters(parameters);
-  }
-
-  /**
-   * @return protect mode
-   */
-  public ProtectMode getProtectMode(){
-    return MetaStoreUtils.getProtectMode(tPartition);
-  }
-
-  /**
-   * @return True protect mode indicates the partition if offline.
-   */
-  public boolean isOffline(){
-    ProtectMode pm = getProtectMode();
-    if (pm == null) {
-      return false;
-    } else {
-      return pm.offline;
-    }
-  }
-
-  /**
-   * @return True if protect mode attribute of the partition indicate
-   * that it is OK to drop the table
-   */
-  public boolean canDrop() {
-    return MetaStoreUtils.canDropPartition(table.getTTable(), tPartition);
-  }
-
-  /**
-   * @return True if protect mode attribute of the partition indicate
-   * that it is OK to write to the table
-   */
-  public boolean canWrite() {
-    ProtectMode mode = getProtectMode();
-    return (!mode.offline && !mode.readOnly);
-  }
-
-  /**
    * @return include the db name
    */
   public String getCompleteName() {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index e53933e..52ed4a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -18,15 +18,6 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -37,7 +28,6 @@ import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.ProtectMode;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -65,6 +55,15 @@ import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.OutputFormat;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
 /**
  * A Hive Table: is a fundamental unit of data in Hive that shares a common schema/DDL.
  *
@@ -849,52 +848,6 @@ public class Table implements Serializable {
   }
 
   /**
-   * @param protectMode
-   */
-  public void setProtectMode(ProtectMode protectMode){
-    Map<String, String> parameters = tTable.getParameters();
-    String pm = protectMode.toString();
-    if (pm != null) {
-      parameters.put(ProtectMode.PARAMETER_NAME, pm);
-    } else {
-      parameters.remove(ProtectMode.PARAMETER_NAME);
-    }
-    tTable.setParameters(parameters);
-  }
-
-  /**
-   * @return protect mode
-   */
-  public ProtectMode getProtectMode(){
-    return MetaStoreUtils.getProtectMode(tTable);
-  }
-
-  /**
-   * @return True protect mode indicates the table if offline.
-   */
-  public boolean isOffline(){
-    return getProtectMode().offline;
-  }
-
-  /**
-   * @return True if protect mode attribute of the partition indicate
-   * that it is OK to drop the partition
-   */
-  public boolean canDrop() {
-    ProtectMode mode = getProtectMode();
-    return (!mode.noDrop && !mode.offline && !mode.readOnly && !mode.noDropCascade);
-  }
-
-  /**
-   * @return True if protect mode attribute of the table indicate
-   * that it is OK to write the table
-   */
-  public boolean canWrite() {
-    ProtectMode mode = getProtectMode();
-    return (!mode.offline && !mode.readOnly);
-  }
-
-  /**
    * @return include the db name
    */
   public String getCompleteName() {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
index bc09fc3..a78700d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
@@ -18,16 +18,6 @@
 
 package org.apache.hadoop.hive.ql.metadata.formatting;
 
-import java.math.BigInteger;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
 import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -55,6 +45,16 @@ import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
 
 /**
  * This class provides methods to format table and index information.
@@ -357,8 +357,6 @@ public final class MetaDataFormatUtils {
     formatOutput("Owner:", tbl.getOwner(), tableInfo);
     formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo);
     formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo);
-    String protectMode = tbl.getProtectMode().toString();
-    formatOutput("Protect Mode:", protectMode == null ? "None" : protectMode, tableInfo);
     formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo);
     if (!tbl.isView()) {
       formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo);
@@ -378,8 +376,6 @@ public final class MetaDataFormatUtils {
     formatOutput("CreateTime:", formatDate(part.getTPartition().getCreateTime()), tableInfo);
     formatOutput("LastAccessTime:", formatDate(part.getTPartition().getLastAccessTime()),
         tableInfo);
-    String protectMode = part.getProtectMode().toString();
-    formatOutput("Protect Mode:", protectMode == null ? "None" : protectMode, tableInfo);
     formatOutput("Location:", part.getLocation(), tableInfo);
 
     if (part.getTPartition().getParameters().size() > 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 24ca663..21625bc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -18,28 +18,7 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION;
-import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES;
-
-import java.io.Serializable;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.Set;
-
+import com.google.common.collect.Lists;
 import org.antlr.runtime.tree.CommonTree;
 import org.antlr.runtime.tree.Tree;
 import org.apache.commons.logging.Log;
@@ -158,7 +137,27 @@ import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.util.StringUtils;
 
-import com.google.common.collect.Lists;
+import java.io.Serializable;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION;
+import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES;
 
 /**
  * DDLSemanticAnalyzer.
@@ -288,8 +287,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
         analyzeExchangePartition(qualified, ast);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) {
         analyzeAlterTableFileFormat(ast, tableName, partSpec);
-      } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROTECTMODE) {
-        analyzeAlterTableProtectMode(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) {
         analyzeAlterTableLocation(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) {
@@ -1476,56 +1473,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
 
   }
 
-  private void analyzeAlterTableProtectMode(ASTNode ast, String tableName,
-      HashMap<String, String> partSpec)
-      throws SemanticException {
-
-    AlterTableDesc alterTblDesc =
-        new AlterTableDesc(AlterTableTypes.ALTERPROTECTMODE);
-
-    alterTblDesc.setOldName(tableName);
-    alterTblDesc.setPartSpec(partSpec);
-
-    ASTNode child = (ASTNode) ast.getChild(0);
-
-    switch (child.getToken().getType()) {
-    case HiveParser.TOK_ENABLE:
-      alterTblDesc.setProtectModeEnable(true);
-      break;
-    case HiveParser.TOK_DISABLE:
-      alterTblDesc.setProtectModeEnable(false);
-      break;
-    default:
-      throw new SemanticException(
-          "Set Protect mode Syntax parsing error.");
-    }
-
-    ASTNode grandChild = (ASTNode) child.getChild(0);
-    switch (grandChild.getToken().getType()) {
-    case HiveParser.TOK_OFFLINE:
-      alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.OFFLINE);
-      break;
-    case HiveParser.TOK_NO_DROP:
-      if (grandChild.getChildCount() > 0) {
-        alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP_CASCADE);
-      }
-      else {
-        alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP);
-      }
-      break;
-    case HiveParser.TOK_READONLY:
-      throw new SemanticException(
-          "Potect mode READONLY is not implemented");
-    default:
-      throw new SemanticException(
-          "Only protect mode NO_DROP or OFFLINE supported");
-    }
-
-    addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc);
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        alterTblDesc), conf));
-  }
-
   private void analyzeAlterTablePartMergeFiles(ASTNode ast,
       String tableName, HashMap<String, String> partSpec)
       throws SemanticException {
@@ -2690,11 +2637,10 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     re.noLockNeeded();
     inputs.add(re);
 
-    boolean ignoreProtection = ast.getFirstChildWithType(HiveParser.TOK_IGNOREPROTECTION) != null;
-    addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, ignoreProtection);
+    addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists);
 
     DropTableDesc dropTblDesc =
-        new DropTableDesc(getDotName(qualified), partSpecs, expectView, ignoreProtection, mustPurge, replicationSpec);
+        new DropTableDesc(getDotName(qualified), partSpecs, expectView, mustPurge, replicationSpec);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf));
   }
 
@@ -3165,9 +3111,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
    * throwIfNonExistent is true, otherwise ignore it.
    */
   private void addTableDropPartsOutputs(Table tab,
-      Collection<List<ExprNodeGenericFuncDesc>> partSpecs, boolean throwIfNonExistent,
-      boolean ignoreProtection) throws SemanticException {
-
+                                        Collection<List<ExprNodeGenericFuncDesc>> partSpecs,
+                                        boolean throwIfNonExistent) throws SemanticException {
     for (List<ExprNodeGenericFuncDesc> specs : partSpecs) {
       for (ExprNodeGenericFuncDesc partSpec : specs) {
         List<Partition> parts = new ArrayList<Partition>();
@@ -3193,11 +3138,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
           }
         }
         for (Partition p : parts) {
-          // TODO: same thing, metastore already checks this but check here if we can.
-          if (!ignoreProtection && !p.canDrop()) {
-            throw new SemanticException(
-              ErrorMsg.DROP_COMMAND_NOT_ALLOWED_FOR_PARTITION.getMsg(p.getCompleteName()));
-          }
           outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL_EXCLUSIVE));
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
index a4c5d0e..bdf0ed7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
@@ -18,20 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.StringTokenizer;
-import java.util.TreeMap;
-
 import com.google.common.base.Function;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
@@ -56,6 +42,18 @@ import org.json.JSONException;
 import org.json.JSONObject;
 
 import javax.annotation.Nullable;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.StringTokenizer;
+import java.util.TreeMap;
 
 /**
  *
@@ -129,11 +127,6 @@ public class EximUtil {
   }
 
   static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws SemanticException {
-    if (table.isOffline()) {
-      throw new SemanticException(
-          ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(":Table "
-              + table.getTableName()));
-    }
     if (table.isView()) {
       throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index bdd7cb7..85c0ae6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -110,8 +110,6 @@ KW_AFTER: 'AFTER';
 KW_DESCRIBE: 'DESCRIBE';
 KW_DROP: 'DROP';
 KW_RENAME: 'RENAME';
-KW_IGNORE: 'IGNORE';
-KW_PROTECTION: 'PROTECTION';
 KW_TO: 'TO';
 KW_COMMENT: 'COMMENT';
 KW_BOOLEAN: 'BOOLEAN';
@@ -157,11 +155,8 @@ KW_INPUTFORMAT: 'INPUTFORMAT';
 KW_OUTPUTFORMAT: 'OUTPUTFORMAT';
 KW_INPUTDRIVER: 'INPUTDRIVER';
 KW_OUTPUTDRIVER: 'OUTPUTDRIVER';
-KW_OFFLINE: 'OFFLINE';
 KW_ENABLE: 'ENABLE';
 KW_DISABLE: 'DISABLE';
-KW_READONLY: 'READONLY';
-KW_NO_DROP: 'NO_DROP';
 KW_LOCATION: 'LOCATION';
 KW_TABLESAMPLE: 'TABLESAMPLE';
 KW_BUCKET: 'BUCKET';

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 15f1f11..3f95bb8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -151,7 +151,6 @@ TOK_ALTERTABLE_REPLACECOLS;
 TOK_ALTERTABLE_ADDPARTS;
 TOK_ALTERTABLE_DROPPARTS;
 TOK_ALTERTABLE_PARTCOLTYPE;
-TOK_ALTERTABLE_PROTECTMODE;
 TOK_ALTERTABLE_MERGEFILES;
 TOK_ALTERTABLE_TOUCH;
 TOK_ALTERTABLE_ARCHIVE;
@@ -330,7 +329,6 @@ TOK_WINDOWDEF;
 TOK_WINDOWSPEC;
 TOK_WINDOWVALUES;
 TOK_WINDOWRANGE;
-TOK_IGNOREPROTECTION;
 TOK_SUBQUERY_EXPR;
 TOK_SUBQUERY_OP;
 TOK_SUBQUERY_OP_NOTIN;
@@ -809,13 +807,6 @@ orReplace
     -> ^(TOK_ORREPLACE)
     ;
 
-ignoreProtection
-@init { pushMsg("ignore protection clause", state); }
-@after { popMsg(state); }
-        : KW_IGNORE KW_PROTECTION
-        -> ^(TOK_IGNOREPROTECTION)
-        ;
-
 createDatabaseStatement
 @init { pushMsg("create database statement", state); }
 @after { popMsg(state); }
@@ -1022,7 +1013,6 @@ alterTblPartitionStatementSuffix
 @after {popMsg(state);}
   : alterStatementSuffixFileFormat
   | alterStatementSuffixLocation
-  | alterStatementSuffixProtectMode
   | alterStatementSuffixMergeFiles
   | alterStatementSuffixSerdeProperties
   | alterStatementSuffixRenamePart
@@ -1166,9 +1156,9 @@ partitionLocation
 alterStatementSuffixDropPartitions[boolean table]
 @init { pushMsg("drop partition statement", state); }
 @after { popMsg(state); }
-    : KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? KW_PURGE? replicationClause?
-    -> { table }? ^(TOK_ALTERTABLE_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection? KW_PURGE? replicationClause?)
-    ->            ^(TOK_ALTERVIEW_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection? replicationClause?)
+    : KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* KW_PURGE? replicationClause?
+    -> { table }? ^(TOK_ALTERTABLE_DROPPARTS dropPartitionSpec+ ifExists? KW_PURGE? replicationClause?)
+    ->            ^(TOK_ALTERVIEW_DROPPARTS dropPartitionSpec+ ifExists? replicationClause?)
     ;
 
 alterStatementSuffixProperties
@@ -1276,13 +1266,6 @@ alterStatementSuffixExchangePartition
     -> ^(TOK_ALTERTABLE_EXCHANGEPARTITION partitionSpec $exchangename)
     ;
 
-alterStatementSuffixProtectMode
-@init { pushMsg("alter partition protect mode statement", state); }
-@after { popMsg(state); }
-    : alterProtectMode
-    -> ^(TOK_ALTERTABLE_PROTECTMODE alterProtectMode)
-    ;
-
 alterStatementSuffixRenamePart
 @init { pushMsg("alter table rename partition statement", state); }
 @after { popMsg(state); }
@@ -1304,21 +1287,6 @@ alterStatementSuffixMergeFiles
     -> ^(TOK_ALTERTABLE_MERGEFILES)
     ;
 
-alterProtectMode
-@init { pushMsg("protect mode specification enable", state); }
-@after { popMsg(state); }
-    : KW_ENABLE alterProtectModeMode  -> ^(TOK_ENABLE alterProtectModeMode)
-    | KW_DISABLE alterProtectModeMode  -> ^(TOK_DISABLE alterProtectModeMode)
-    ;
-
-alterProtectModeMode
-@init { pushMsg("protect mode specification enable", state); }
-@after { popMsg(state); }
-    : KW_OFFLINE  -> ^(TOK_OFFLINE)
-    | KW_NO_DROP KW_CASCADE? -> ^(TOK_NO_DROP KW_CASCADE?)
-    | KW_READONLY  -> ^(TOK_READONLY)
-    ;
-
 alterStatementSuffixBucketNum
 @init { pushMsg("", state); }
 @after { popMsg(state); }

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 944cee4..85fa9c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -206,12 +206,7 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
     // initialize destination table/partition
     TableSpec ts = new TableSpec(db, conf, (ASTNode) tableTree);
 
-    if (ts.tableHandle.isOffline()){
-      throw new SemanticException(
-          ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(":Table " + ts.tableName));
-    }
-
-    if (ts.tableHandle.isView()) {
+   if (ts.tableHandle.isView()) {
       throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
     }
     if (ts.tableHandle.isNonNative()) {
@@ -255,10 +250,6 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
       try{
         Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false);
         if (part != null) {
-          if (part.isOffline()) {
-            throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION.
-                getMsg(ts.tableName + ":" + part.getName()));
-          }
           if (isOverWrite){
             outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT_OVERWRITE));
           } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 8516631..aab4250 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -1601,19 +1601,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, tab_name);
         }
 
-        // We check offline of the table, as if people only select from an
-        // non-existing partition of an offline table, the partition won't
-        // be added to inputs and validate() won't have the information to
-        // check the table's offline status.
-        // TODO: Modify the code to remove the checking here and consolidate
-        // it in validate()
-        //
-        if (tab.isOffline()) {
-          throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION.
-              getMsg("Table " + getUnescapedName(qb.getParseInfo().getSrcForAlias(alias))));
-        }
-
-        if (tab.isView()) {
+       if (tab.isView()) {
           if (qb.getParseInfo().isAnalyzeCommand()) {
             throw new SemanticException(ErrorMsg.ANALYZE_VIEW.getMsg());
           }
@@ -10569,20 +10557,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
       Table tbl = readEntity.getTable();
       Partition p = readEntity.getPartition();
-
-
-      if (tbl.isOffline()) {
-        throw new SemanticException(
-            ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(
-                "Table " + tbl.getTableName()));
-      }
-
-      if (type == ReadEntity.Type.PARTITION && p != null && p.isOffline()) {
-        throw new SemanticException(
-            ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(
-                "Table " + tbl.getTableName() +
-                    " Partition " + p.getName()));
-      }
     }
 
     for (WriteEntity writeEntity : getOutputs()) {
@@ -10636,25 +10610,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         } catch (HiveException e) {
           throw new SemanticException(e);
         }
-
-        if (type == WriteEntity.Type.PARTITION && p != null && p.isOffline()) {
-          throw new SemanticException(
-              ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(
-                  " Table " + tbl.getTableName() +
-                      " Partition " + p.getName()));
-        }
-
       }
       else {
         LOG.debug("Not a partition.");
         tbl = writeEntity.getTable();
       }
-
-      if (tbl.isOffline()) {
-        throw new SemanticException(
-            ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(
-                "Table " + tbl.getTableName()));
-      }
     }
 
     boolean reworkMapredWork = HiveConf.getBoolVar(this.conf,

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
index 97d02ea..2fdf1e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.util.HashMap;
-
 import org.antlr.runtime.tree.Tree;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
+import java.util.HashMap;
+
 /**
  * SemanticAnalyzerFactory.
  *
@@ -114,10 +114,6 @@ public final class SemanticAnalyzerFactory {
   }
 
   static {
-    tablePartitionCommandType.put(
-        HiveParser.TOK_ALTERTABLE_PROTECTMODE,
-        new HiveOperation[] { HiveOperation.ALTERTABLE_PROTECTMODE,
-            HiveOperation.ALTERPARTITION_PROTECTMODE });
     tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_FILEFORMAT,
         new HiveOperation[] { HiveOperation.ALTERTABLE_FILEFORMAT,
             HiveOperation.ALTERPARTITION_FILEFORMAT });


[34/50] [abbrv] hive git commit: HIVE-11130 - Refactoring the code so that HiveTxnManager interface will support lock/unlock table/database object (Aihua Xu, reviewed by Alan Gates and Chao Sun)

Posted by xu...@apache.org.
HIVE-11130 - Refactoring the code so that HiveTxnManager interface will support lock/unlock table/database object (Aihua Xu, reviewed by Alan Gates and Chao Sun)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/46c76d6b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/46c76d6b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/46c76d6b

Branch: refs/heads/beeline-cli
Commit: 46c76d6b8e1fb323d1f8ac75061b6fd3bb747d6d
Parents: 999e0e3
Author: Aihua Xu <ai...@gmail.com>
Authored: Tue Jul 14 11:49:09 2015 -0700
Committer: Chao Sun <ch...@cloudera.com>
Committed: Tue Jul 14 11:49:09 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 147 +------------------
 .../hadoop/hive/ql/lockmgr/HiveLockObject.java  |  35 +++++
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java  |  47 +++++-
 .../hive/ql/lockmgr/HiveTxnManagerImpl.java     | 140 ++++++++++++++++++
 4 files changed, 227 insertions(+), 142 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/46c76d6b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 049857b..a8c6aca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -2444,8 +2444,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         locks = lockMgr.getLocks(false, isExt);
       }
       else {
-        locks = lockMgr.getLocks(getHiveObject(showLocks.getTableName(),
-            showLocks.getPartSpec()),
+        locks = lockMgr.getLocks(HiveLockObject.createFrom(db,
+            showLocks.getTableName(), showLocks.getPartSpec()),
             true, isExt);
       }
 
@@ -2705,46 +2705,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   private int lockTable(LockTableDesc lockTbl) throws HiveException {
     Context ctx = driverContext.getCtx();
     HiveTxnManager txnManager = ctx.getHiveTxnManager();
-    if (!txnManager.supportsExplicitLock()) {
-      throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED,
-          conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER));
-    }
-    HiveLockManager lockMgr = txnManager.getLockManager();
-    if (lockMgr == null) {
-      throw new HiveException("lock Table LockManager not specified");
-    }
-
-    HiveLockMode mode = HiveLockMode.valueOf(lockTbl.getMode());
-    String tabName = lockTbl.getTableName();
-    Table  tbl = db.getTable(tabName);
-    if (tbl == null) {
-      throw new HiveException("Table " + tabName + " does not exist ");
-    }
-
-    Map<String, String> partSpec = lockTbl.getPartSpec();
-    HiveLockObjectData lockData =
-        new HiveLockObjectData(lockTbl.getQueryId(),
-            String.valueOf(System.currentTimeMillis()),
-            "EXPLICIT",
-            lockTbl.getQueryStr());
-
-    if (partSpec == null) {
-      HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true);
-      if (lck == null) {
-        return 1;
-      }
-      return 0;
-    }
-
-    Partition par = db.getPartition(tbl, partSpec, false);
-    if (par == null) {
-      throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist");
-    }
-    HiveLock lck = lockMgr.lock(new HiveLockObject(par, lockData), mode, true);
-    if (lck == null) {
-      return 1;
-    }
-    return 0;
+    return txnManager.lockTable(db, lockTbl);
   }
 
   /**
@@ -2759,33 +2720,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   private int lockDatabase(LockDatabaseDesc lockDb) throws HiveException {
     Context ctx = driverContext.getCtx();
     HiveTxnManager txnManager = ctx.getHiveTxnManager();
-    if (!txnManager.supportsExplicitLock()) {
-      throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED,
-          conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER));
-    }
-    HiveLockManager lockMgr = txnManager.getLockManager();
-    if (lockMgr == null) {
-      throw new HiveException("lock Database LockManager not specified");
-    }
-
-    HiveLockMode mode = HiveLockMode.valueOf(lockDb.getMode());
-    String dbName = lockDb.getDatabaseName();
-
-    Database dbObj = db.getDatabase(dbName);
-    if (dbObj == null) {
-      throw new HiveException("Database " + dbName + " does not exist ");
-    }
-
-    HiveLockObjectData lockData =
-        new HiveLockObjectData(lockDb.getQueryId(),
-            String.valueOf(System.currentTimeMillis()),
-            "EXPLICIT", lockDb.getQueryStr());
-
-    HiveLock lck = lockMgr.lock(new HiveLockObject(dbObj.getName(), lockData), mode, true);
-    if (lck == null) {
-      return 1;
-    }
-    return 0;
+    return txnManager.lockDatabase(db, lockDb);
   }
 
   /**
@@ -2800,55 +2735,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   private int unlockDatabase(UnlockDatabaseDesc unlockDb) throws HiveException {
     Context ctx = driverContext.getCtx();
     HiveTxnManager txnManager = ctx.getHiveTxnManager();
-    if (!txnManager.supportsExplicitLock()) {
-      throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED,
-          conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER));
-    }
-    HiveLockManager lockMgr = txnManager.getLockManager();
-    if (lockMgr == null) {
-      throw new HiveException("unlock Database LockManager not specified");
-    }
-
-    String dbName = unlockDb.getDatabaseName();
-
-    Database dbObj = db.getDatabase(dbName);
-    if (dbObj == null) {
-      throw new HiveException("Database " + dbName + " does not exist ");
-    }
-    HiveLockObject obj = new HiveLockObject(dbObj.getName(), null);
-
-    List<HiveLock> locks = lockMgr.getLocks(obj, false, false);
-    if ((locks == null) || (locks.isEmpty())) {
-      throw new HiveException("Database " + dbName + " is not locked ");
-    }
-
-    for (HiveLock lock: locks) {
-      lockMgr.unlock(lock);
-
-    }
-    return 0;
-  }
-
-  private HiveLockObject getHiveObject(String tabName,
-      Map<String, String> partSpec) throws HiveException {
-    Table  tbl = db.getTable(tabName);
-    if (tbl == null) {
-      throw new HiveException("Table " + tabName + " does not exist ");
-    }
-
-    HiveLockObject obj = null;
-
-    if  (partSpec == null) {
-      obj = new HiveLockObject(tbl, null);
-    }
-    else {
-      Partition par = db.getPartition(tbl, partSpec, false);
-      if (par == null) {
-        throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist");
-      }
-      obj = new HiveLockObject(par, null);
-    }
-    return obj;
+    return txnManager.unlockDatabase(db, unlockDb);
   }
 
   /**
@@ -2863,29 +2750,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   private int unlockTable(UnlockTableDesc unlockTbl) throws HiveException {
     Context ctx = driverContext.getCtx();
     HiveTxnManager txnManager = ctx.getHiveTxnManager();
-    if (!txnManager.supportsExplicitLock()) {
-      throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED,
-          conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER));
-    }
-    HiveLockManager lockMgr = txnManager.getLockManager();
-    if (lockMgr == null) {
-      throw new HiveException("unlock Table LockManager not specified");
-    }
-
-    String tabName = unlockTbl.getTableName();
-    HiveLockObject obj = getHiveObject(tabName, unlockTbl.getPartSpec());
-
-    List<HiveLock> locks = lockMgr.getLocks(obj, false, false);
-    if ((locks == null) || (locks.isEmpty())) {
-      throw new HiveException("Table " + tabName + " is not locked ");
-    }
-    Iterator<HiveLock> locksIter = locks.iterator();
-    while (locksIter.hasNext()) {
-      HiveLock lock = locksIter.next();
-      lockMgr.unlock(lock);
-    }
-
-    return 0;
+    return txnManager.unlockTable(db, unlockTbl);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/46c76d6b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
index 7e93387..fadd074 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java
@@ -19,9 +19,12 @@
 package org.apache.hadoop.hive.ql.lockmgr;
 
 import java.util.Arrays;
+import java.util.Map;
 
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.hive.ql.metadata.DummyPartition;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 
@@ -196,6 +199,38 @@ public class HiveLockObject {
     this(new String[] {par.getName()}, lockData);
   }
 
+  /**
+   * Creates a locking object for a table (when partition spec is not provided)
+   * or a table partition
+   * @param hiveDB    an object to communicate with the metastore
+   * @param tableName the table to create the locking object on
+   * @param partSpec  the spec of a partition to create the locking object on
+   * @return  the locking object
+   * @throws HiveException
+   */
+  public static HiveLockObject createFrom(Hive hiveDB, String tableName,
+      Map<String, String> partSpec) throws HiveException {
+    Table  tbl = hiveDB.getTable(tableName);
+    if (tbl == null) {
+      throw new HiveException("Table " + tableName + " does not exist ");
+    }
+
+    HiveLockObject obj = null;
+
+    if  (partSpec == null) {
+      obj = new HiveLockObject(tbl, null);
+    }
+    else {
+      Partition par = hiveDB.getPartition(tbl, partSpec, false);
+      if (par == null) {
+        throw new HiveException("Partition " + partSpec + " for table " +
+            tableName + " does not exist");
+      }
+      obj = new HiveLockObject(par, null);
+    }
+    return obj;
+  }
+
   public String[] getPaths() {
     return pathNames;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/46c76d6b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
index 6c3dc33..c900548 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
@@ -18,9 +18,14 @@
 package org.apache.hadoop.hive.ql.lockmgr;
 
 import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.QueryPlan;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc;
+import org.apache.hadoop.hive.ql.plan.LockTableDesc;
+import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
+import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 
 /**
  * An interface that allows Hive to manage transactions.  All classes
@@ -116,6 +121,46 @@ public interface HiveTxnManager {
   boolean supportsExplicitLock();
 
   /**
+   * This function is called to lock the table when explicit lock command is
+   * issued on a table.
+   * @param hiveDB    an object to communicate with the metastore
+   * @param lockTbl   table locking info, such as table name, locking mode
+   * @return 0 if the locking succeeds, 1 otherwise.
+   * @throws HiveException
+   */
+  int lockTable(Hive hiveDB, LockTableDesc lockTbl) throws HiveException;
+
+  /**
+   * This function is called to unlock the table when explicit unlock command is
+   * issued on a table.
+   * @param hiveDB    an object to communicate with the metastore
+   * @param unlockTbl table unlocking info, such as table name
+   * @return 0 if the locking succeeds, 1 otherwise.
+   * @throws HiveException
+   */
+  int unlockTable(Hive hiveDB, UnlockTableDesc unlockTbl) throws HiveException;
+
+  /**
+   * This function is called to lock the database when explicit lock command is
+   * issued on a database.
+   * @param hiveDB    an object to communicate with the metastore
+   * @param lockDb    database locking info, such as database name, locking mode
+   * @return 0 if the locking succeeds, 1 otherwise.
+   * @throws HiveException
+   */
+  int lockDatabase(Hive hiveDB, LockDatabaseDesc lockDb) throws HiveException;
+
+  /**
+   * This function is called to unlock the database when explicit unlock command
+   * is issued on a database.
+   * @param hiveDB    an object to communicate with the metastore
+   * @param unlockDb  database unlocking info, such as database name
+   * @return 0 if the locking succeeds, 1 otherwise.
+   * @throws HiveException
+   */
+  int unlockDatabase(Hive hiveDB, UnlockDatabaseDesc unlockDb) throws HiveException;
+
+  /**
    * Indicate whether this transaction manager returns information about locks in the new format
    * for show locks or the old one.
    * @return true if the new format should be used.

http://git-wip-us.apache.org/repos/asf/hive/blob/46c76d6b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
index eccb8d1..ceeae68 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java
@@ -17,7 +17,22 @@
  */
 package org.apache.hadoop.hive.ql.lockmgr;
 
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc;
+import org.apache.hadoop.hive.ql.plan.LockTableDesc;
+import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
+import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 
 /**
  * An implementation HiveTxnManager that includes internal methods that all
@@ -44,4 +59,129 @@ abstract class HiveTxnManagerImpl implements HiveTxnManager {
     destruct();
   }
 
+  @Override
+  public int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException {
+    HiveLockManager lockMgr = getAndCheckLockManager();
+
+    HiveLockMode mode = HiveLockMode.valueOf(lockTbl.getMode());
+    String tabName = lockTbl.getTableName();
+    Table  tbl = db.getTable(tabName);
+    if (tbl == null) {
+      throw new HiveException("Table " + tabName + " does not exist ");
+    }
+
+    Map<String, String> partSpec = lockTbl.getPartSpec();
+    HiveLockObjectData lockData =
+        new HiveLockObjectData(lockTbl.getQueryId(),
+            String.valueOf(System.currentTimeMillis()),
+            "EXPLICIT",
+            lockTbl.getQueryStr());
+
+    if (partSpec == null) {
+      HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true);
+      if (lck == null) {
+        return 1;
+      }
+      return 0;
+    }
+
+    Partition par = db.getPartition(tbl, partSpec, false);
+    if (par == null) {
+      throw new HiveException("Partition " + partSpec + " for table " +
+          tabName + " does not exist");
+    }
+    HiveLock lck = lockMgr.lock(new HiveLockObject(par, lockData), mode, true);
+    if (lck == null) {
+      return 1;
+    }
+    return 0;
+  }
+
+  @Override
+  public int unlockTable(Hive hiveDB, UnlockTableDesc unlockTbl) throws HiveException {
+    HiveLockManager lockMgr = getAndCheckLockManager();
+
+    String tabName = unlockTbl.getTableName();
+    HiveLockObject obj = HiveLockObject.createFrom(hiveDB, tabName,
+        unlockTbl.getPartSpec());
+
+    List<HiveLock> locks = lockMgr.getLocks(obj, false, false);
+    if ((locks == null) || (locks.isEmpty())) {
+      throw new HiveException("Table " + tabName + " is not locked ");
+    }
+    Iterator<HiveLock> locksIter = locks.iterator();
+    while (locksIter.hasNext()) {
+      HiveLock lock = locksIter.next();
+      lockMgr.unlock(lock);
+    }
+
+    return 0;
+  }
+
+  @Override
+  public int lockDatabase(Hive hiveDB, LockDatabaseDesc lockDb) throws HiveException {
+    HiveLockManager lockMgr = getAndCheckLockManager();
+
+    HiveLockMode mode = HiveLockMode.valueOf(lockDb.getMode());
+    String dbName = lockDb.getDatabaseName();
+
+    Database dbObj = hiveDB.getDatabase(dbName);
+    if (dbObj == null) {
+      throw new HiveException("Database " + dbName + " does not exist ");
+    }
+
+    HiveLockObjectData lockData =
+        new HiveLockObjectData(lockDb.getQueryId(),
+            String.valueOf(System.currentTimeMillis()),
+            "EXPLICIT", lockDb.getQueryStr());
+
+    HiveLock lck = lockMgr.lock(new HiveLockObject(dbObj.getName(), lockData), mode, true);
+    if (lck == null) {
+      return 1;
+    }
+    return 0;
+  }
+
+  @Override
+  public int unlockDatabase(Hive hiveDB, UnlockDatabaseDesc unlockDb) throws HiveException {
+    HiveLockManager lockMgr = getAndCheckLockManager();
+
+    String dbName = unlockDb.getDatabaseName();
+
+    Database dbObj = hiveDB.getDatabase(dbName);
+    if (dbObj == null) {
+      throw new HiveException("Database " + dbName + " does not exist ");
+    }
+    HiveLockObject obj = new HiveLockObject(dbObj.getName(), null);
+
+    List<HiveLock> locks = lockMgr.getLocks(obj, false, false);
+    if ((locks == null) || (locks.isEmpty())) {
+      throw new HiveException("Database " + dbName + " is not locked ");
+    }
+
+    for (HiveLock lock: locks) {
+      lockMgr.unlock(lock);
+
+    }
+    return 0;
+  }
+
+  /**
+   * Gets the lock manager and verifies if the explicit lock is supported
+   * @return  the lock manager
+   * @throws HiveException
+   */
+  protected HiveLockManager getAndCheckLockManager() throws HiveException {
+    HiveLockManager lockMgr = getLockManager();
+    if (lockMgr == null) {
+      throw new HiveException("LockManager cannot be acquired");
+    }
+
+    if (!supportsExplicitLock()) {
+      throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED,
+          conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER));
+    }
+
+    return lockMgr;
+  }
 }


[25/50] [abbrv] hive git commit: HIVE-11232 : CBO: Calcite Operator To Hive Operator (Calcite Return Path): fix the output of select_same_col.q (Pengcheng Xiong via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-11232 : CBO: Calcite Operator To Hive Operator (Calcite Return Path): fix the output of select_same_col.q (Pengcheng Xiong via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/17f759d6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/17f759d6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/17f759d6

Branch: refs/heads/beeline-cli
Commit: 17f759d6332f4f9fb87e4679c01447cd27370420
Parents: 8121b9a
Author: Pengcheng Xiong <px...@hortonworks.com>
Authored: Mon Jul 20 02:51:00 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Mon Jul 13 09:41:55 2015 -0700

----------------------------------------------------------------------
 ql/src/test/queries/clientpositive/select_same_col.q     | 5 +++--
 ql/src/test/results/clientpositive/select_same_col.q.out | 8 ++++++--
 2 files changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/17f759d6/ql/src/test/queries/clientpositive/select_same_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/select_same_col.q b/ql/src/test/queries/clientpositive/select_same_col.q
index d6902c2..21f0d45 100644
--- a/ql/src/test/queries/clientpositive/select_same_col.q
+++ b/ql/src/test/queries/clientpositive/select_same_col.q
@@ -1,6 +1,7 @@
-
 set hive.cbo.enable=true;
 
+-- SORT_BEFORE_DIFF
+
 drop table srclimit;
 create table srclimit as select * from src limit 10;
 
@@ -16,4 +17,4 @@ select *, key, value from srclimit;
 
 select * from (select *, key, value from srclimit) t;
 
-drop table srclimit;
\ No newline at end of file
+drop table srclimit;

http://git-wip-us.apache.org/repos/asf/hive/blob/17f759d6/ql/src/test/results/clientpositive/select_same_col.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/select_same_col.q.out b/ql/src/test/results/clientpositive/select_same_col.q.out
index 426f716..f7362f0 100644
--- a/ql/src/test/results/clientpositive/select_same_col.q.out
+++ b/ql/src/test/results/clientpositive/select_same_col.q.out
@@ -1,6 +1,10 @@
-PREHOOK: query: drop table srclimit
+PREHOOK: query: -- SORT_BEFORE_DIFF
+
+drop table srclimit
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table srclimit
+POSTHOOK: query: -- SORT_BEFORE_DIFF
+
+drop table srclimit
 POSTHOOK: type: DROPTABLE
 PREHOOK: query: create table srclimit as select * from src limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT


[35/50] [abbrv] hive git commit: HIVE-11215: Delete spills only if they exist (Gopal V, reviewed by Matt Mccline)

Posted by xu...@apache.org.
HIVE-11215: Delete spills only if they exist (Gopal V, reviewed by Matt Mccline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6e0d4809
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6e0d4809
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6e0d4809

Branch: refs/heads/beeline-cli
Commit: 6e0d4809baed42e25a46373f0a01a3ef421337ad
Parents: 46c76d6
Author: Gopal V <go...@apache.org>
Authored: Tue Jul 14 22:19:02 2015 -0700
Committer: Gopal V <go...@apache.org>
Committed: Tue Jul 14 22:19:02 2015 -0700

----------------------------------------------------------------------
 .../exec/vector/mapjoin/VectorMapJoinRowBytesContainer.java | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6e0d4809/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinRowBytesContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinRowBytesContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinRowBytesContainer.java
index 32b60d0..d2e980c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinRowBytesContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinRowBytesContainer.java
@@ -305,9 +305,12 @@ public class VectorMapJoinRowBytesContainer {
       }
       fileOutputStream = null;
     }
-    try {
-      FileUtil.fullyDelete(parentFile);
-    } catch (Throwable ignored) {
+
+    if (parentFile != null) {
+      try {
+        FileUtil.fullyDelete(parentFile);
+      } catch (Throwable ignored) {
+      }
     }
     parentFile = null;
     tmpFile = null;