You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/09/20 01:26:49 UTC

svn commit: r1626364 [1/2] - in /hive/branches/cbo: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/ itests/util/src/main/java/org/apache/hadoop/hive/ql/security/ itests/u...

Author: gunther
Date: Fri Sep 19 23:26:48 2014
New Revision: 1626364

URL: http://svn.apache.org/r1626364
Log:
Merge latest trunk into cbo branch. (Gunther Hagleitner)

Added:
    hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/MetastoreAuthzAPIDisallowAuthorizer.java
      - copied unchanged from r1626362, hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/MetastoreAuthzAPIDisallowAuthorizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/DummyHiveAuthorizationValidator.java
      - copied unchanged from r1626362, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/DummyHiveAuthorizationValidator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdConfOnlyAuthorizerFactory.java
      - copied unchanged from r1626362, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdConfOnlyAuthorizerFactory.java
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_cli_createtab_noauthzapi.q
      - copied unchanged from r1626362, hive/trunk/ql/src/test/queries/clientpositive/authorization_cli_createtab_noauthzapi.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_cli_nonsql.q
      - copied unchanged from r1626362, hive/trunk/ql/src/test/queries/clientpositive/authorization_cli_nonsql.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_cli_stdconfigauth.q
      - copied unchanged from r1626362, hive/trunk/ql/src/test/queries/clientpositive/authorization_cli_stdconfigauth.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_non_id.q
      - copied unchanged from r1626362, hive/trunk/ql/src/test/queries/clientpositive/authorization_non_id.q
    hive/branches/cbo/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out
      - copied unchanged from r1626362, hive/trunk/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out
      - copied unchanged from r1626362, hive/trunk/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out
      - copied unchanged from r1626362, hive/trunk/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/authorization_non_id.q.out
      - copied unchanged from r1626362, hive/trunk/ql/src/test/results/clientpositive/authorization_non_id.q.out
Modified:
    hive/branches/cbo/   (props changed)
    hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java
    hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowSerializer.java
    hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
    hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java
    hive/branches/cbo/ql/pom.xml
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactory.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_cli_createtab.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q
    hive/branches/cbo/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out
    hive/branches/cbo/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
    hive/branches/cbo/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
    hive/branches/cbo/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
    hive/branches/cbo/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java

Propchange: hive/branches/cbo/
------------------------------------------------------------------------------
  Merged /hive/trunk:r1626122-1626362

Modified: hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java (original)
+++ hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java Fri Sep 19 23:26:48 2014
@@ -22,10 +22,14 @@ import org.apache.accumulo.core.client.A
 import org.apache.accumulo.core.client.mapred.AccumuloOutputFormat;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.Mutation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters;
 import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.util.Progressable;
 
 import com.google.common.base.Preconditions;
 
@@ -41,6 +45,13 @@ public class HiveAccumuloTableOutputForm
     super.checkOutputSpecs(ignored, job);
   }
 
+  @Override
+  public RecordWriter<Text,Mutation> getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException {
+    configureAccumuloOutputFormat(job);
+
+    return super.getRecordWriter(ignored, job, name, progress);
+  }
+
   protected void configureAccumuloOutputFormat(JobConf job) throws IOException {
     AccumuloConnectionParameters cnxnParams = new AccumuloConnectionParameters(job);
 
@@ -76,16 +87,32 @@ public class HiveAccumuloTableOutputForm
 
   protected void setAccumuloConnectorInfo(JobConf conf, String username, AuthenticationToken token)
       throws AccumuloSecurityException {
-    AccumuloOutputFormat.setConnectorInfo(conf, username, token);
+    try {
+      AccumuloOutputFormat.setConnectorInfo(conf, username, token);
+    } catch (IllegalStateException e) {
+      // AccumuloOutputFormat complains if you re-set an already set value. We just don't care.
+      log.debug("Ignoring exception setting Accumulo Connector instance for user " + username, e);
+    }
   }
 
   @SuppressWarnings("deprecation")
   protected void setAccumuloZooKeeperInstance(JobConf conf, String instanceName, String zookeepers) {
-    AccumuloOutputFormat.setZooKeeperInstance(conf, instanceName, zookeepers);
+    try {
+      AccumuloOutputFormat.setZooKeeperInstance(conf, instanceName, zookeepers);
+    } catch (IllegalStateException ise) {
+      // AccumuloOutputFormat complains if you re-set an already set value. We just don't care.
+      log.debug("Ignoring exception setting ZooKeeper instance of " + instanceName + " at "
+          + zookeepers, ise);
+    }
   }
 
   protected void setAccumuloMockInstance(JobConf conf, String instanceName) {
-    AccumuloOutputFormat.setMockInstance(conf, instanceName);
+    try {
+      AccumuloOutputFormat.setMockInstance(conf, instanceName);
+    } catch (IllegalStateException e) {
+      // AccumuloOutputFormat complains if you re-set an already set value. We just don't care.
+      log.debug("Ignoring exception setting mock instance of " + instanceName, e);
+    }
   }
 
   protected void setDefaultAccumuloTableName(JobConf conf, String tableName) {

Modified: hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowSerializer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowSerializer.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowSerializer.java (original)
+++ hive/branches/cbo/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowSerializer.java Fri Sep 19 23:26:48 2014
@@ -99,9 +99,6 @@ public class AccumuloRowSerializer {
     // The ObjectInspector for the row ID
     ObjectInspector fieldObjectInspector = field.getFieldObjectInspector();
 
-    log.info("Serializing rowId with " + value + " in " + field + " using "
-        + rowIdFactory.getClass());
-
     // Serialize the row component using the RowIdFactory. In the normal case, this will just
     // delegate back to the "local" serializeRowId method
     byte[] data = rowIdFactory.serializeRowId(value, field, output);

Modified: hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java (original)
+++ hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java Fri Sep 19 23:26:48 2014
@@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.securit
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactory;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
@@ -38,8 +39,9 @@ public class SQLStdHiveAuthorizationVali
 
   public SQLStdHiveAuthorizationValidatorForTest(HiveMetastoreClientFactory metastoreClientFactory,
       HiveConf conf, HiveAuthenticationProvider authenticator,
-      SQLStdHiveAccessControllerWrapper privController) {
-    super(metastoreClientFactory, conf, authenticator, privController);
+      SQLStdHiveAccessControllerWrapper privController, HiveAuthzSessionContext ctx)
+      throws HiveAuthzPluginException {
+    super(metastoreClientFactory, conf, authenticator, privController, ctx);
   }
 
   @Override

Modified: hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java (original)
+++ hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java Fri Sep 19 23:26:48 2014
@@ -37,7 +37,7 @@ public class SQLStdHiveAuthorizerFactory
     return new HiveAuthorizerImpl(
         privilegeManager,
         new SQLStdHiveAuthorizationValidatorForTest(metastoreClientFactory, conf, authenticator,
-            privilegeManager)
+            privilegeManager, ctx)
         );
   }
 }

Modified: hive/branches/cbo/ql/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/pom.xml?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/pom.xml (original)
+++ hive/branches/cbo/ql/pom.xml Fri Sep 19 23:26:48 2014
@@ -612,6 +612,7 @@
                   <include>com.twitter:parquet-hadoop-bundle</include>
                   <include>org.apache.thrift:libthrift</include>
                   <include>commons-lang:commons-lang</include>
+                  <include>org.apache.commons:commons-lang3</include>
                   <include>org.jodd:jodd-core</include>
                   <include>org.json:json</include>
                   <include>org.apache.avro:avro</include>

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Fri Sep 19 23:26:48 2014
@@ -27,6 +27,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -34,10 +35,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
-import org.apache.hadoop.hive.ql.exec.FooterBuffer;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
@@ -49,7 +48,6 @@ import org.apache.hadoop.hive.ql.plan.Fe
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.SerDeUtils;
@@ -61,11 +59,8 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveTypeEntry;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.InputFormat;
@@ -83,6 +78,9 @@ public class FetchOperator implements Se
   static Log LOG = LogFactory.getLog(FetchOperator.class.getName());
   static LogHelper console = new LogHelper(LOG);
 
+  public static final String FETCH_OPERATOR_DIRECTORY_LIST =
+      "hive.complete.dir.list";
+
   private boolean isNativeTable;
   private FetchWork work;
   protected Operator<?> operator;    // operator tree for processing row further (option)
@@ -353,6 +351,7 @@ public class FetchOperator implements Se
         }
         return;
       } else {
+        setFetchOperatorContext(job, work.getPartDir());
         iterPath = work.getPartDir().iterator();
         iterPartDesc = work.getPartDesc().iterator();
       }
@@ -381,6 +380,30 @@ public class FetchOperator implements Se
   }
 
   /**
+   * Set context for this fetch operator in to the jobconf.
+   * This helps InputFormats make decisions based on the scope of the complete
+   * operation.
+   * @param conf the configuration to modify
+   * @param partDirs the list of partition directories
+   */
+  static void setFetchOperatorContext(JobConf conf,
+                                      ArrayList<Path> partDirs) {
+    if (partDirs != null) {
+      StringBuilder buff = new StringBuilder();
+      boolean first = true;
+      for(Path p: partDirs) {
+        if (first) {
+          first = false;
+        } else {
+          buff.append('\t');
+        }
+        buff.append(StringEscapeUtils.escapeJava(p.toString()));
+      }
+      conf.set(FETCH_OPERATOR_DIRECTORY_LIST, buff.toString());
+    }
+  }
+
+  /**
    * A cache of Object Inspector Settable Properties.
    */
   private static Map<ObjectInspector, Boolean> oiSettableProperties = new HashMap<ObjectInspector, Boolean>();

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java Fri Sep 19 23:26:48 2014
@@ -697,6 +697,7 @@ public class SMBMapJoinOperator extends 
     // But if hive supports assigning bucket number for each partition, this can be vary
     public void setupContext(List<Path> paths) throws HiveException {
       int segmentLen = paths.size();
+      FetchOperator.setFetchOperatorContext(jobConf, fetchWork.getPartDir());
       FetchOperator[] segments = segmentsForSize(segmentLen);
       for (int i = 0 ; i < segmentLen; i++) {
         Path path = paths.get(i);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Fri Sep 19 23:26:48 2014
@@ -27,6 +27,7 @@ import org.antlr.runtime.CommonToken;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.WordUtils;
+import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -2275,13 +2276,15 @@ public final class Utilities {
    *          configuration which receives configured properties
    */
   public static void copyTableJobPropertiesToConf(TableDesc tbl, JobConf job) {
-    String bucketString = tbl.getProperties()
-        .getProperty(hive_metastoreConstants.BUCKET_COUNT);
-    // copy the bucket count
-    if (bucketString != null) {
-      job.set(hive_metastoreConstants.BUCKET_COUNT, bucketString);
+    Properties tblProperties = tbl.getProperties();
+    for(String name: tblProperties.stringPropertyNames()) {
+      if (job.get(name) == null) {
+        String val = (String) tblProperties.get(name);
+        if (val != null) {
+          job.set(name, StringEscapeUtils.escapeJava(val));
+        }
+      }
     }
-
     Map<String, String> jobProperties = tbl.getJobProperties();
     if (jobProperties == null) {
       return;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java Fri Sep 19 23:26:48 2014
@@ -850,7 +850,7 @@ public class DagUtils {
     throws IOException {
     FileSystem destFS = dest.getFileSystem(conf);
 
-    if (src != null) {
+    if (src != null && checkPreExisting(src, dest, conf) == false) {
       // copy the src to the destination and create local resource.
       // do not overwrite.
       LOG.info("Localizing resource because it does not exist: " + src + " to dest: " + dest);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java Fri Sep 19 23:26:48 2014
@@ -660,7 +660,7 @@ public final class VectorExpressionWrite
       @Override
       public Object writeValue(byte[] value, int start, int length) throws HiveException {
         this.text.set(value, start, length);
-        ((SettableStringObjectInspector) this.objectInspector).set(this.obj, this.text.toString());
+        ((SettableStringObjectInspector) this.objectInspector).set(this.obj, this.text);
         return this.obj;
       }
 
@@ -671,7 +671,7 @@ public final class VectorExpressionWrite
           field = initValue(null);
         }
         this.text.set(value, start, length);
-        ((SettableStringObjectInspector) this.objectInspector).set(field, this.text.toString());
+        ((SettableStringObjectInspector) this.objectInspector).set(field, this.text);
         return field;
       }
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java Fri Sep 19 23:26:48 2014
@@ -318,7 +318,7 @@ public class AcidUtils {
       String filename = file.getPath().getName();
       if (filename.startsWith(BASE_PREFIX) ||
           filename.startsWith(DELTA_PREFIX)) {
-        if (file.isDirectory()) {
+        if (file.isDir()) {
           return true;
         }
       }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java Fri Sep 19 23:26:48 2014
@@ -122,24 +122,7 @@ public class BucketizedHiveInputFormat<K
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
     init(job);
 
-    Path[] dirs = FileInputFormat.getInputPaths(job);
-    if (dirs.length == 0) {
-      // on tez we're avoiding to duplicate the file info in FileInputFormat.
-      if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
-        try {
-          List<Path> paths = Utilities.getInputPathsTez(job, mrwork);
-          dirs = paths.toArray(new Path[paths.size()]);
-          if (dirs.length == 0) {
-            // if we still don't have any files it's time to fail.
-            throw new IOException("No input paths specified in job");
-          }
-        } catch (Exception e) {
-          throw new IOException("Could not create input paths", e);
-        }
-      } else {
-        throw new IOException("No input paths specified in job");
-      }
-    }
+    Path[] dirs = getInputPaths(job);
 
     JobConf newjob = new JobConf(job);
     ArrayList<InputSplit> result = new ArrayList<InputSplit>();

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java Fri Sep 19 23:26:48 2014
@@ -33,6 +33,7 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -264,8 +265,8 @@ public class CombineHiveInputFormat<K ex
   /**
    * Create Hive splits based on CombineFileSplit.
    */
-  @Override
-  public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
+  private InputSplit[] getCombineSplits(JobConf job,
+                                        int numSplits) throws IOException {
     PerfLogger perfLogger = PerfLogger.getPerfLogger();
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
     init(job);
@@ -274,17 +275,6 @@ public class CombineHiveInputFormat<K ex
       mrwork.getAliasToWork();
     CombineFileInputFormatShim combine = ShimLoader.getHadoopShims()
         .getCombineFileInputFormat();
-    
-    // on tez we're avoiding duplicating path info since the info will go over
-    // rpc
-    if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
-      try {
-        List<Path> dirs = Utilities.getInputPathsTez(job, mrwork);
-        Utilities.setInputPaths(job, dirs);
-      } catch (Exception e) {
-        throw new IOException("Could not create input paths", e);
-      }
-    }
 
     InputSplit[] splits = null;
     if (combine == null) {
@@ -327,13 +317,6 @@ public class CombineHiveInputFormat<K ex
         // ignore
       }
       FileSystem inpFs = path.getFileSystem(job);
-      if (inputFormatClass.isAssignableFrom(OrcInputFormat.class)) {
-        if (inpFs.exists(new Path(path, OrcRecordUpdater.ACID_FORMAT))) {
-          throw new IOException("CombineHiveInputFormat is incompatible " +
-            " with ACID tables. Please set hive.input.format=" +
-              "org.apache.hadoop.hive.ql.io.HiveInputFormat");
-        }
-      }
 
       // Since there is no easy way of knowing whether MAPREDUCE-1597 is present in the tree or not,
       // we use a configuration variable for the same
@@ -461,6 +444,84 @@ public class CombineHiveInputFormat<K ex
     return result.toArray(new CombineHiveInputSplit[result.size()]);
   }
 
+  /**
+   * Create Hive splits based on CombineFileSplit.
+   */
+  @Override
+  public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
+    init(job);
+    Map<String, ArrayList<String>> pathToAliases = mrwork.getPathToAliases();
+    Map<String, Operator<? extends OperatorDesc>> aliasToWork =
+        mrwork.getAliasToWork();
+
+    ArrayList<InputSplit> result = new ArrayList<InputSplit>();
+
+    Path[] paths = getInputPaths(job);
+
+    List<Path> nonCombinablePaths = new ArrayList<Path>(paths.length / 2);
+    List<Path> combinablePaths = new ArrayList<Path>(paths.length / 2);
+
+    for (Path path : paths) {
+
+      PartitionDesc part =
+          HiveFileFormatUtils.getPartitionDescFromPathRecursively(
+              pathToPartitionInfo, path,
+              IOPrepareCache.get().allocatePartitionDescMap());
+
+      // Use HiveInputFormat if any of the paths is not splittable
+      Class inputFormatClass = part.getInputFileFormatClass();
+      String inputFormatClassName = inputFormatClass.getName();
+      InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job);
+      if (inputFormat instanceof AvoidSplitCombination &&
+          ((AvoidSplitCombination) inputFormat).shouldSkipCombine(path, job)) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("The split [" + path +
+              "] is being parked for HiveInputFormat.getSplits");
+        }
+        nonCombinablePaths.add(path);
+      } else {
+        combinablePaths.add(path);
+      }
+    }
+
+    // Store the previous value for the path specification
+    String oldPaths = job.get(HiveConf.ConfVars.HADOOPMAPREDINPUTDIR.varname);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("The received input paths are: [" + oldPaths +
+          "] against the property "
+          + HiveConf.ConfVars.HADOOPMAPREDINPUTDIR.varname);
+    }
+
+    // Process the normal splits
+    if (nonCombinablePaths.size() > 0) {
+      FileInputFormat.setInputPaths(job, nonCombinablePaths.toArray
+          (new Path[nonCombinablePaths.size()]));
+      InputSplit[] splits = super.getSplits(job, numSplits);
+      for (InputSplit split : splits) {
+        result.add(split);
+      }
+    }
+
+    // Process the combine splits
+    if (combinablePaths.size() > 0) {
+      FileInputFormat.setInputPaths(job, combinablePaths.toArray
+          (new Path[combinablePaths.size()]));
+      InputSplit[] splits = getCombineSplits(job, numSplits);
+      for (InputSplit split : splits) {
+        result.add(split);
+      }
+    }
+
+    // Restore the old path information back
+    // This is just to prevent incompatibilities with previous versions Hive
+    // if some application depends on the original value being set.
+    if (oldPaths != null) {
+      job.set(HiveConf.ConfVars.HADOOPMAPREDINPUTDIR.varname, oldPaths);
+    }
+    LOG.info("Number of all splits " + result.size());
+    return result.toArray(new InputSplit[result.size()]);
+  }
+
   private void processPaths(JobConf job, CombineFileInputFormatShim combine,
       List<InputSplitShim> iss, Path... path) throws IOException {
     JobConf currJob = new JobConf(job);
@@ -635,4 +696,12 @@ public class CombineHiveInputFormat<K ex
       return s.toString();
     }
   }
+
+    /**
+     * This is a marker interface that is used to identify the formats where
+     * combine split generation is not applicable
+     */
+  public interface AvoidSplitCombination {
+    boolean shouldSkipCombine(Path path, Configuration conf) throws IOException;
+  }
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java Fri Sep 19 23:26:48 2014
@@ -295,11 +295,7 @@ public class HiveInputFormat<K extends W
     }
   }
 
-  public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
-    PerfLogger perfLogger = PerfLogger.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
-    init(job);
-
+  Path[] getInputPaths(JobConf job) throws IOException {
     Path[] dirs = FileInputFormat.getInputPaths(job);
     if (dirs.length == 0) {
       // on tez we're avoiding to duplicate the file info in FileInputFormat.
@@ -314,6 +310,14 @@ public class HiveInputFormat<K extends W
         throw new IOException("No input paths specified in job");
       }
     }
+    return dirs;
+  }
+
+  public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
+    PerfLogger perfLogger = PerfLogger.getPerfLogger();
+    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
+    init(job);
+    Path[] dirs = getInputPaths(job);
     JobConf newjob = new JobConf(job);
     List<InputSplit> result = new ArrayList<InputSplit>();
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java Fri Sep 19 23:26:48 2014
@@ -24,6 +24,8 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
+import java.util.NavigableMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
@@ -46,6 +48,7 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 import org.apache.hadoop.hive.ql.io.InputFormatChecker;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
@@ -99,7 +102,8 @@ import com.google.common.util.concurrent
  */
 public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
   InputFormatChecker, VectorizedInputFormatInterface,
-    AcidInputFormat<NullWritable, OrcStruct> {
+    AcidInputFormat<NullWritable, OrcStruct>, 
+    CombineHiveInputFormat.AvoidSplitCombination {
 
   private static final Log LOG = LogFactory.getLog(OrcInputFormat.class);
   static final HadoopShims SHIMS = ShimLoader.getHadoopShims();
@@ -125,6 +129,12 @@ public class OrcInputFormat  implements 
    */
   private static final double MIN_INCLUDED_LOCATION = 0.80;
 
+  @Override
+  public boolean shouldSkipCombine(Path path,
+                                   Configuration conf) throws IOException {
+    return AcidUtils.isAcid(path, conf);
+  }
+
   private static class OrcRecordReader
       implements org.apache.hadoop.mapred.RecordReader<NullWritable, OrcStruct>,
       StatsProvidingRecordReader {
@@ -610,7 +620,7 @@ public class OrcInputFormat  implements 
     private final FileSystem fs;
     private final FileStatus file;
     private final long blockSize;
-    private final BlockLocation[] locations;
+    private final TreeMap<Long, BlockLocation> locations;
     private final FileInfo fileInfo;
     private List<StripeInformation> stripes;
     private ReaderImpl.FileMetaInfo fileMetaInfo;
@@ -630,7 +640,7 @@ public class OrcInputFormat  implements 
       this.file = file;
       this.blockSize = file.getBlockSize();
       this.fileInfo = fileInfo;
-      locations = SHIMS.getLocations(fs, file);
+      locations = SHIMS.getLocationsWithOffset(fs, file);
       this.isOriginal = isOriginal;
       this.deltas = deltas;
       this.hasBase = hasBase;
@@ -641,8 +651,8 @@ public class OrcInputFormat  implements 
     }
 
     void schedule() throws IOException {
-      if(locations.length == 1 && file.getLen() < context.maxSize) {
-        String[] hosts = locations[0].getHosts();
+      if(locations.size() == 1 && file.getLen() < context.maxSize) {
+        String[] hosts = locations.firstEntry().getValue().getHosts();
         synchronized (context.splits) {
           context.splits.add(new OrcSplit(file.getPath(), 0, file.getLen(),
                 hosts, fileMetaInfo, isOriginal, hasBase, deltas));
@@ -690,15 +700,22 @@ public class OrcInputFormat  implements 
     void createSplit(long offset, long length,
                      ReaderImpl.FileMetaInfo fileMetaInfo) throws IOException {
       String[] hosts;
-      if ((offset % blockSize) + length <= blockSize) {
+      Map.Entry<Long, BlockLocation> startEntry = locations.floorEntry(offset);
+      BlockLocation start = startEntry.getValue();
+      if (offset + length <= start.getOffset() + start.getLength()) {
         // handle the single block case
-        hosts = locations[(int) (offset / blockSize)].getHosts();
+        hosts = start.getHosts();
       } else {
+        Map.Entry<Long, BlockLocation> endEntry = locations.floorEntry(offset + length);
+        BlockLocation end = endEntry.getValue();
+        //get the submap
+        NavigableMap<Long, BlockLocation> navigableMap = locations.subMap(startEntry.getKey(),
+                  true, endEntry.getKey(), true);
         // Calculate the number of bytes in the split that are local to each
         // host.
         Map<String, LongWritable> sizes = new HashMap<String, LongWritable>();
         long maxSize = 0;
-        for(BlockLocation block: locations) {
+        for (BlockLocation block : navigableMap.values()) {
           long overlap = getOverlap(offset, length, block.getOffset(),
               block.getLength());
           if (overlap > 0) {
@@ -711,6 +728,9 @@ public class OrcInputFormat  implements 
               val.set(val.get() + overlap);
               maxSize = Math.max(maxSize, val.get());
             }
+          } else {
+            throw new IOException("File " + file.getPath().toString() +
+                    " should have had overlap on block starting at " + block.getOffset());
           }
         }
         // filter the list of locations to those that have at least 80% of the
@@ -718,7 +738,7 @@ public class OrcInputFormat  implements 
         long threshold = (long) (maxSize * MIN_INCLUDED_LOCATION);
         List<String> hostList = new ArrayList<String>();
         // build the locations in a predictable order to simplify testing
-        for(BlockLocation block: locations) {
+        for(BlockLocation block: navigableMap.values()) {
           for(String host: block.getHosts()) {
             if (sizes.containsKey(host)) {
               if (sizes.get(host).get() >= threshold) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java Fri Sep 19 23:26:48 2014
@@ -193,11 +193,12 @@ public class StatsOptimizer implements T
         }
         SelectOperator selOp = (SelectOperator)tsOp.getChildren().get(0);
         for(ExprNodeDesc desc : selOp.getConf().getColList()) {
-          if (!(desc instanceof ExprNodeColumnDesc)) {
+          if (!((desc instanceof ExprNodeColumnDesc) || (desc instanceof ExprNodeConstantDesc))) {
             // Probably an expression, cant handle that
             return null;
           }
         }
+        Map<String, ExprNodeDesc> exprMap = selOp.getColumnExprMap();
         // Since we have done an exact match on TS-SEL-GBY-RS-GBY-SEL-FS
         // we need not to do any instanceof checks for following.
         GroupByOperator gbyOp = (GroupByOperator)selOp.getChildren().get(0);
@@ -215,6 +216,12 @@ public class StatsOptimizer implements T
           return null;
 
         }
+        for(ExprNodeDesc desc : selOp.getConf().getColList()) {
+          if (!(desc instanceof ExprNodeColumnDesc)) {
+            // Probably an expression, cant handle that
+            return null;
+          }
+        }
         FileSinkOperator fsOp = (FileSinkOperator)(selOp.getChildren().get(0));
         if (fsOp.getChildOperators() != null && fsOp.getChildOperators().size() > 0) {
           // looks like a subq plan.
@@ -236,22 +243,28 @@ public class StatsOptimizer implements T
           GenericUDAFResolver udaf =
               FunctionRegistry.getGenericUDAFResolver(aggr.getGenericUDAFName());
           if (udaf instanceof GenericUDAFSum) {
-            if(!(aggr.getParameters().get(0) instanceof ExprNodeConstantDesc)){
+            ExprNodeDesc desc = aggr.getParameters().get(0);
+            String constant;
+            if (desc instanceof ExprNodeConstantDesc) {
+              constant = ((ExprNodeConstantDesc) desc).getValue().toString();
+            } else if (desc instanceof ExprNodeColumnDesc && exprMap.get(((ExprNodeColumnDesc)desc).getColumn()) instanceof ExprNodeConstantDesc) {
+              constant = ((ExprNodeConstantDesc)exprMap.get(((ExprNodeColumnDesc)desc).getColumn())).getValue().toString();
+            } else {
               return null;
             }
             Long rowCnt = getRowCnt(pctx, tsOp, tbl);
             if(rowCnt == null) {
               return null;
             }
-            oneRow.add(HiveDecimal.create(((ExprNodeConstantDesc) aggr.getParameters().get(0))
-                .getValue().toString()).multiply(HiveDecimal.create(rowCnt)));
+            oneRow.add(HiveDecimal.create(constant).multiply(HiveDecimal.create(rowCnt)));
             ois.add(PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(
                 PrimitiveCategory.DECIMAL));
           }
           else if (udaf instanceof GenericUDAFCount) {
             Long rowCnt = 0L;
-            if ((aggr.getParameters().isEmpty() || aggr.getParameters().get(0) instanceof
-                ExprNodeConstantDesc)) {
+            if (aggr.getParameters().isEmpty() || aggr.getParameters().get(0) instanceof
+                ExprNodeConstantDesc || ((aggr.getParameters().get(0) instanceof ExprNodeColumnDesc) &&
+                    exprMap.get(((ExprNodeColumnDesc)aggr.getParameters().get(0)).getColumn()) instanceof ExprNodeConstantDesc)) {
               // Its either count (*) or count(1) case
               rowCnt = getRowCnt(pctx, tsOp, tbl);
               if(rowCnt == null) {
@@ -259,12 +272,7 @@ public class StatsOptimizer implements T
               }
             } else {
               // Its count(col) case
-              if (!(aggr.getParameters().get(0) instanceof ExprNodeColumnDesc)) {
-                // this is weird, we got expr or something in there, bail out
-                Log.debug("Unexpected expression : " + aggr.getParameters().get(0));
-                return null;
-              }
-              ExprNodeColumnDesc desc = (ExprNodeColumnDesc)aggr.getParameters().get(0);
+              ExprNodeColumnDesc desc = (ExprNodeColumnDesc)exprMap.get(((ExprNodeColumnDesc)aggr.getParameters().get(0)).getColumn());
               String colName = desc.getColumn();
               StatType type = getType(desc.getTypeString());
               if(!tbl.isPartitioned()) {
@@ -330,7 +338,7 @@ public class StatsOptimizer implements T
             ois.add(PrimitiveObjectInspectorFactory.
                 getPrimitiveJavaObjectInspector(PrimitiveCategory.LONG));
           } else if (udaf instanceof GenericUDAFMax) {
-            ExprNodeColumnDesc colDesc = (ExprNodeColumnDesc)aggr.getParameters().get(0);
+            ExprNodeColumnDesc colDesc = (ExprNodeColumnDesc)exprMap.get(((ExprNodeColumnDesc)aggr.getParameters().get(0)).getColumn());
             String colName = colDesc.getColumn();
             StatType type = getType(colDesc.getTypeString());
             if(!tbl.isPartitioned()) {
@@ -419,7 +427,7 @@ public class StatsOptimizer implements T
               }
             }
           }  else if (udaf instanceof GenericUDAFMin) {
-            ExprNodeColumnDesc colDesc = (ExprNodeColumnDesc)aggr.getParameters().get(0);
+            ExprNodeColumnDesc colDesc = (ExprNodeColumnDesc)exprMap.get(((ExprNodeColumnDesc)aggr.getParameters().get(0)).getColumn());
             String colName = colDesc.getColumn();
             StatType type = getType(colDesc.getTypeString());
             if (!tbl.isPartitioned()) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Fri Sep 19 23:26:48 2014
@@ -1530,8 +1530,8 @@ principalSpecification
 principalName
 @init {pushMsg("user|group|role name", state);}
 @after {popMsg(state);}
-    : KW_USER identifier -> ^(TOK_USER identifier)
-    | KW_GROUP identifier -> ^(TOK_GROUP identifier)
+    : KW_USER principalIdentifier -> ^(TOK_USER principalIdentifier)
+    | KW_GROUP principalIdentifier -> ^(TOK_GROUP principalIdentifier)
     | KW_ROLE identifier -> ^(TOK_ROLE identifier)
     ;
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g Fri Sep 19 23:26:48 2014
@@ -536,6 +536,13 @@ functionIdentifier
     identifier
     ;
 
+principalIdentifier
+@init { gParent.pushMsg("identifier for principal spec", state); }
+@after { gParent.popMsg(state); }
+    : identifier
+    | QuotedIdentifier
+    ;
+
 nonReserved
     :
     KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | 
 KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_AN
 ALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION | KW_VALUES

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java Fri Sep 19 23:26:48 2014
@@ -21,6 +21,9 @@ package org.apache.hadoop.hive.ql.proces
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
@@ -31,6 +34,7 @@ import org.apache.hadoop.hive.ql.session
 import com.google.common.base.Joiner;
 
 class CommandUtil {
+  public static final Log LOG = LogFactory.getLog(CommandUtil.class);
 
   /**
    * Authorize command of given type and arguments
@@ -47,14 +51,19 @@ class CommandUtil {
       // ss can be null in unit tests
       return null;
     }
-    if (ss.isAuthorizationModeV2()) {
+
+    if (ss.isAuthorizationModeV2() &&
+        HiveConf.getBoolVar(ss.getConf(), HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
+      String errMsg = "Error authorizing command " + command;
       try {
         authorizeCommandThrowEx(ss, type, command);
         // authorized to perform action
         return null;
       } catch (HiveAuthzPluginException e) {
+        LOG.error(errMsg, e);
         return CommandProcessorResponse.create(e);
       } catch (HiveAccessControlException e) {
+        LOG.error(errMsg, e);
         return CommandProcessorResponse.create(e);
       }
     }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java Fri Sep 19 23:26:48 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -53,6 +54,8 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext.CLIENT_TYPE;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrincipal;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilege;
@@ -455,4 +458,23 @@ public class SQLAuthorizationUtils {
     return hivePrincipals;
   }
 
+  /**
+   * Change the session context based on configuration to aid in testing of sql
+   * std auth
+   *
+   * @param ctx
+   * @param conf
+   * @return
+   */
+  static HiveAuthzSessionContext applyTestSettings(HiveAuthzSessionContext ctx, HiveConf conf) {
+    if (conf.getBoolVar(ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE)
+        && ctx.getClientType() == CLIENT_TYPE.HIVECLI) {
+      // create new session ctx object with HS2 as client type
+      HiveAuthzSessionContext.Builder ctxBuilder = new HiveAuthzSessionContext.Builder(ctx);
+      ctxBuilder.setClientType(CLIENT_TYPE.HIVESERVER2);
+      return ctxBuilder.build();
+    }
+    return ctx;
+  }
+
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java Fri Sep 19 23:26:48 2014
@@ -90,42 +90,11 @@ public class SQLStdHiveAccessController 
       HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException {
     this.metastoreClientFactory = metastoreClientFactory;
     this.authenticator = authenticator;
-    this.sessionCtx = applyTestSettings(ctx, conf);
-
-    assertHiveCliAuthDisabled(conf);
-    initUserRoles();
+    this.sessionCtx = SQLAuthorizationUtils.applyTestSettings(ctx, conf);
     LOG.info("Created SQLStdHiveAccessController for session context : " + sessionCtx);
   }
 
   /**
-   * Change the session context based on configuration to aid in testing of sql std auth
-   * @param ctx
-   * @param conf
-   * @return
-   */
-  private HiveAuthzSessionContext applyTestSettings(HiveAuthzSessionContext ctx, HiveConf conf) {
-    if(conf.getBoolVar(ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE) &&
-        ctx.getClientType() == CLIENT_TYPE.HIVECLI
-        ){
-      // create new session ctx object with HS2 as client type
-      HiveAuthzSessionContext.Builder ctxBuilder = new HiveAuthzSessionContext.Builder(ctx);
-      ctxBuilder.setClientType(CLIENT_TYPE.HIVESERVER2);
-      return ctxBuilder.build();
-    }
-    return ctx;
-  }
-
-  private void assertHiveCliAuthDisabled(HiveConf conf) throws HiveAuthzPluginException {
-    if (sessionCtx.getClientType() == CLIENT_TYPE.HIVECLI
-        && conf.getBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
-      throw new HiveAuthzPluginException(
-          "SQL standards based authorization should not be enabled from hive cli"
-              + "Instead the use of storage based authorization in hive metastore is reccomended. Set "
-              + ConfVars.HIVE_AUTHORIZATION_ENABLED.varname + "=false to disable authz within cli");
-    }
-  }
-
-  /**
    * (Re-)initialize currentRoleNames if necessary.
    * @throws HiveAuthzPluginException
    */

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java Fri Sep 19 23:26:48 2014
@@ -25,12 +25,15 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizationValidator;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext.CLIENT_TYPE;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactory;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrincipal;
@@ -44,16 +47,30 @@ public class SQLStdHiveAuthorizationVali
   private final HiveConf conf;
   private final HiveAuthenticationProvider authenticator;
   private final SQLStdHiveAccessControllerWrapper privController;
+  private final HiveAuthzSessionContext ctx;
   public static final Log LOG = LogFactory.getLog(SQLStdHiveAuthorizationValidator.class);
 
   public SQLStdHiveAuthorizationValidator(HiveMetastoreClientFactory metastoreClientFactory,
       HiveConf conf, HiveAuthenticationProvider authenticator,
-      SQLStdHiveAccessControllerWrapper privilegeManager) {
+      SQLStdHiveAccessControllerWrapper privilegeManager, HiveAuthzSessionContext ctx)
+      throws HiveAuthzPluginException {
 
     this.metastoreClientFactory = metastoreClientFactory;
     this.conf = conf;
     this.authenticator = authenticator;
     this.privController = privilegeManager;
+    this.ctx = SQLAuthorizationUtils.applyTestSettings(ctx, conf);
+    assertHiveCliAuthDisabled(conf);
+  }
+
+  private void assertHiveCliAuthDisabled(HiveConf conf) throws HiveAuthzPluginException {
+    if (ctx.getClientType() == CLIENT_TYPE.HIVECLI
+        && conf.getBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
+      throw new HiveAuthzPluginException(
+          "SQL standards based authorization should not be enabled from hive cli"
+              + "Instead the use of storage based authorization in hive metastore is reccomended. Set "
+              + ConfVars.HIVE_AUTHORIZATION_ENABLED.varname + "=false to disable authz within cli");
+    }
   }
 
   @Override

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactory.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactory.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactory.java Fri Sep 19 23:26:48 2014
@@ -37,7 +37,7 @@ public class SQLStdHiveAuthorizerFactory
     return new HiveAuthorizerImpl(
         privilegeManager,
         new SQLStdHiveAuthorizationValidator(metastoreClientFactory, conf, authenticator,
-            privilegeManager)
+            privilegeManager, ctx)
         );
   }
 }

Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (original)
+++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java Fri Sep 19 23:26:48 2014
@@ -18,17 +18,24 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.io.IOContext;
 import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
 import org.apache.hadoop.hive.ql.plan.CollectDesc;
@@ -42,6 +49,10 @@ import org.apache.hadoop.hive.ql.plan.Pl
 import org.apache.hadoop.hive.ql.plan.ScriptDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.processors.CommandProcessor;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
@@ -49,8 +60,14 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapred.TextInputFormat;
+import org.junit.Test;
 
 /**
  * TestOperators.
@@ -274,7 +291,7 @@ public class TestOperators extends TestC
           cd, sop);
 
       op.initialize(new JobConf(TestOperators.class),
-          new ObjectInspector[] {r[0].oi});
+          new ObjectInspector[]{r[0].oi});
 
       // evaluate on row
       for (int i = 0; i < 5; i++) {
@@ -379,4 +396,82 @@ public class TestOperators extends TestC
       throw (e);
     }
   }
+
+  @Test
+  public void testFetchOperatorContextQuoting() throws Exception {
+    JobConf conf = new JobConf();
+    ArrayList<Path> list = new ArrayList<Path>();
+    list.add(new Path("hdfs://nn.example.com/fi\tl\\e\t1"));
+    list.add(new Path("hdfs://nn.example.com/file\t2"));
+    list.add(new Path("file:/file3"));
+    FetchOperator.setFetchOperatorContext(conf, list);
+    String[] parts =
+        conf.get(FetchOperator.FETCH_OPERATOR_DIRECTORY_LIST).split("\t");
+    assertEquals(3, parts.length);
+    assertEquals("hdfs://nn.example.com/fi\\tl\\\\e\\t1", parts[0]);
+    assertEquals("hdfs://nn.example.com/file\\t2", parts[1]);
+    assertEquals("file:/file3", parts[2]);
+  }
+
+  /**
+   * A custom input format that checks to make sure that the fetch operator
+   * sets the required attributes.
+   */
+  public static class CustomInFmt extends TextInputFormat {
+
+    @Override
+    public InputSplit[] getSplits(JobConf job, int splits) throws IOException {
+
+      // ensure that the table properties were copied
+      assertEquals("val1", job.get("myprop1"));
+      assertEquals("val2", job.get("myprop2"));
+
+      // ensure that both of the partitions are in the complete list.
+      String[] dirs = job.get("hive.complete.dir.list").split("\t");
+      assertEquals(2, dirs.length);
+      assertEquals(true, dirs[0].endsWith("/state=CA"));
+      assertEquals(true, dirs[1].endsWith("/state=OR"));
+      return super.getSplits(job, splits);
+    }
+  }
+
+  @Test
+  public void testFetchOperatorContext() throws Exception {
+    HiveConf conf = new HiveConf();
+    conf.set("hive.support.concurrency", "false");
+    SessionState.start(conf);
+    String cmd = "create table fetchOp (id int, name string) " +
+        "partitioned by (state string) " +
+        "row format delimited fields terminated by '|' " +
+        "stored as " +
+        "inputformat 'org.apache.hadoop.hive.ql.exec.TestOperators$CustomInFmt' " +
+        "outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' " +
+        "tblproperties ('myprop1'='val1', 'myprop2' = 'val2')";
+    Driver driver = new Driver();
+    driver.init();
+    CommandProcessorResponse response = driver.run(cmd);
+    assertEquals(0, response.getResponseCode());
+    List<Object> result = new ArrayList<Object>();
+
+    cmd = "load data local inpath '../data/files/employee.dat' " +
+        "overwrite into table fetchOp partition (state='CA')";
+    driver.init();
+    response = driver.run(cmd);
+    assertEquals(0, response.getResponseCode());
+
+    cmd = "load data local inpath '../data/files/employee2.dat' " +
+        "overwrite into table fetchOp partition (state='OR')";
+    driver.init();
+    response = driver.run(cmd);
+    assertEquals(0, response.getResponseCode());
+
+    cmd = "select * from fetchOp";
+    driver.init();
+    driver.setMaxRows(500);
+    response = driver.run(cmd);
+    assertEquals(0, response.getResponseCode());
+    driver.getResults(result);
+    assertEquals(20, result.size());
+    driver.close();
+  }
 }

Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (original)
+++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java Fri Sep 19 23:26:48 2014
@@ -118,7 +118,6 @@ public class TestInputOutputFormat {
     TimeZone gmt = TimeZone.getTimeZone("GMT+0");
     DATE_FORMAT.setTimeZone(gmt);
     TIME_FORMAT.setTimeZone(gmt);
-    TimeZone local = TimeZone.getDefault();
   }
 
   public static class BigRow implements Writable {
@@ -560,6 +559,12 @@ public class TestInputOutputFormat {
       this.file = file;
     }
 
+    /**
+     * Set the blocks and their location for the file.
+     * Must be called after the stream is closed or the block length will be
+     * wrong.
+     * @param blocks the list of blocks
+     */
     public void setBlocks(MockBlock... blocks) {
       file.blocks = blocks;
       int offset = 0;
@@ -580,12 +585,18 @@ public class TestInputOutputFormat {
       file.content = new byte[file.length];
       System.arraycopy(buf.getData(), 0, file.content, 0, file.length);
     }
+
+    @Override
+    public String toString() {
+      return "Out stream to " + file.toString();
+    }
   }
 
   public static class MockFileSystem extends FileSystem {
     final List<MockFile> files = new ArrayList<MockFile>();
     Path workingDir = new Path("/");
 
+    @SuppressWarnings("unused")
     public MockFileSystem() {
       // empty
     }
@@ -620,7 +631,7 @@ public class TestInputOutputFormat {
           return new FSDataInputStream(new MockInputStream(file));
         }
       }
-      return null;
+      throw new IOException("File not found: " + path);
     }
 
     @Override
@@ -743,8 +754,12 @@ public class TestInputOutputFormat {
           for(MockBlock block: file.blocks) {
             if (OrcInputFormat.SplitGenerator.getOverlap(block.offset,
                 block.length, start, len) > 0) {
+              String[] topology = new String[block.hosts.length];
+              for(int i=0; i < topology.length; ++i) {
+                topology[i] = "/rack/ " + block.hosts[i];
+              }
               result.add(new BlockLocation(block.hosts, block.hosts,
-                  block.offset, block.length));
+                  topology, block.offset, block.length));
             }
           }
           return result.toArray(new BlockLocation[result.size()]);
@@ -1209,7 +1224,8 @@ public class TestInputOutputFormat {
                                          Path warehouseDir,
                                          String tableName,
                                          ObjectInspector objectInspector,
-                                         boolean isVectorized
+                                         boolean isVectorized,
+                                         int partitions
                                          ) throws IOException {
     Utilities.clearWorkMap();
     JobConf conf = new JobConf();
@@ -1218,9 +1234,20 @@ public class TestInputOutputFormat {
     conf.set("hive.vectorized.execution.enabled", Boolean.toString(isVectorized));
     conf.set("fs.mock.impl", MockFileSystem.class.getName());
     conf.set("mapred.mapper.class", ExecMapper.class.getName());
-    Path root = new Path(warehouseDir, tableName + "/p=0");
+    Path root = new Path(warehouseDir, tableName);
+    // clean out previous contents
     ((MockFileSystem) root.getFileSystem(conf)).clear();
-    conf.set("mapred.input.dir", root.toString());
+    // build partition strings
+    String[] partPath = new String[partitions];
+    StringBuilder buffer = new StringBuilder();
+    for(int p=0; p < partitions; ++p) {
+      partPath[p] = new Path(root, "p=" + p).toString();
+      if (p != 0) {
+        buffer.append(',');
+      }
+      buffer.append(partPath[p]);
+    }
+    conf.set("mapred.input.dir", buffer.toString());
     StringBuilder columnIds = new StringBuilder();
     StringBuilder columnNames = new StringBuilder();
     StringBuilder columnTypes = new StringBuilder();
@@ -1249,9 +1276,6 @@ public class TestInputOutputFormat {
     tblProps.put("columns.types", columnTypes.toString());
     TableDesc tbl = new TableDesc(OrcInputFormat.class, OrcOutputFormat.class,
         tblProps);
-    LinkedHashMap<String, String> partSpec =
-        new LinkedHashMap<String, String>();
-    PartitionDesc part = new PartitionDesc(tbl, partSpec);
 
     MapWork mapWork = new MapWork();
     mapWork.setVectorMode(isVectorized);
@@ -1260,11 +1284,16 @@ public class TestInputOutputFormat {
         new LinkedHashMap<String, ArrayList<String>>();
     ArrayList<String> aliases = new ArrayList<String>();
     aliases.add(tableName);
-    aliasMap.put(root.toString(), aliases);
-    mapWork.setPathToAliases(aliasMap);
     LinkedHashMap<String, PartitionDesc> partMap =
         new LinkedHashMap<String, PartitionDesc>();
-    partMap.put(root.toString(), part);
+    for(int p=0; p < partitions; ++p) {
+      aliasMap.put(partPath[p], aliases);
+      LinkedHashMap<String, String> partSpec =
+          new LinkedHashMap<String, String>();
+      PartitionDesc part = new PartitionDesc(tbl, partSpec);
+      partMap.put(partPath[p], part);
+    }
+    mapWork.setPathToAliases(aliasMap);
     mapWork.setPathToPartitionInfo(partMap);
     mapWork.setScratchColumnMap(new HashMap<String, Map<String, Integer>>());
     mapWork.setScratchColumnVectorTypes(new HashMap<String,
@@ -1285,6 +1314,7 @@ public class TestInputOutputFormat {
    * @throws Exception
    */
   @Test
+  @SuppressWarnings("unchecked")
   public void testVectorization() throws Exception {
     // get the object inspector for MyRow
     StructObjectInspector inspector;
@@ -1294,7 +1324,7 @@ public class TestInputOutputFormat {
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "vectorization", inspector, true);
+        "vectorization", inspector, true, 1);
 
     // write the orc file to the mock file system
     Writer writer =
@@ -1332,6 +1362,7 @@ public class TestInputOutputFormat {
    * @throws Exception
    */
   @Test
+  @SuppressWarnings("unchecked")
   public void testVectorizationWithBuckets() throws Exception {
     // get the object inspector for MyRow
     StructObjectInspector inspector;
@@ -1341,7 +1372,7 @@ public class TestInputOutputFormat {
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "vectorBuckets", inspector, true);
+        "vectorBuckets", inspector, true, 1);
 
     // write the orc file to the mock file system
     Writer writer =
@@ -1377,10 +1408,11 @@ public class TestInputOutputFormat {
 
   // test acid with vectorization, no combine
   @Test
+  @SuppressWarnings("unchecked")
   public void testVectorizationWithAcid() throws Exception {
     StructObjectInspector inspector = new BigRowInspector();
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "vectorizationAcid", inspector, true);
+        "vectorizationAcid", inspector, true, 1);
 
     // write the orc file to the mock file system
     Path partDir = new Path(conf.get("mapred.input.dir"));
@@ -1444,6 +1476,7 @@ public class TestInputOutputFormat {
 
   // test non-vectorized, non-acid, combine
   @Test
+  @SuppressWarnings("unchecked")
   public void testCombinationInputFormat() throws Exception {
     // get the object inspector for MyRow
     StructObjectInspector inspector;
@@ -1453,7 +1486,7 @@ public class TestInputOutputFormat {
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "combination", inspector, false);
+        "combination", inspector, false, 1);
 
     // write the orc file to the mock file system
     Path partDir = new Path(conf.get("mapred.input.dir"));
@@ -1516,17 +1549,25 @@ public class TestInputOutputFormat {
   public void testCombinationInputFormatWithAcid() throws Exception {
     // get the object inspector for MyRow
     StructObjectInspector inspector;
+    final int PARTITIONS = 2;
+    final int BUCKETS = 3;
     synchronized (TestOrcFile.class) {
       inspector = (StructObjectInspector)
           ObjectInspectorFactory.getReflectionObjectInspector(MyRow.class,
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "combinationAcid", inspector, false);
+        "combinationAcid", inspector, false, PARTITIONS);
 
     // write the orc file to the mock file system
-    Path partDir = new Path(conf.get("mapred.input.dir"));
-    OrcRecordUpdater writer = new OrcRecordUpdater(partDir,
+    Path[] partDir = new Path[PARTITIONS];
+    String[] paths = conf.getStrings("mapred.input.dir");
+    for(int p=0; p < PARTITIONS; ++p) {
+      partDir[p] = new Path(paths[p]);
+    }
+
+    // write a base file in partition 0
+    OrcRecordUpdater writer = new OrcRecordUpdater(partDir[0],
         new AcidOutputFormat.Options(conf).maximumTransactionId(10)
             .writingBase(true).bucket(0).inspector(inspector));
     for(int i=0; i < 10; ++i) {
@@ -1534,31 +1575,68 @@ public class TestInputOutputFormat {
     }
     WriterImpl baseWriter = (WriterImpl) writer.getWriter();
     writer.close(false);
+
     MockOutputStream outputStream = (MockOutputStream) baseWriter.getStream();
-    int length0 = outputStream.file.length;
-    writer = new OrcRecordUpdater(partDir,
+    outputStream.setBlocks(new MockBlock("host1", "host2"));
+
+    // write a delta file in partition 0
+    writer = new OrcRecordUpdater(partDir[0],
         new AcidOutputFormat.Options(conf).maximumTransactionId(10)
             .writingBase(true).bucket(1).inspector(inspector));
     for(int i=10; i < 20; ++i) {
       writer.insert(10, new MyRow(i, 2*i));
     }
-    baseWriter = (WriterImpl) writer.getWriter();
+    WriterImpl deltaWriter = (WriterImpl) writer.getWriter();
+    outputStream = (MockOutputStream) deltaWriter.getStream();
     writer.close(false);
-    outputStream = (MockOutputStream) baseWriter.getStream();
     outputStream.setBlocks(new MockBlock("host1", "host2"));
 
+    // write three files in partition 1
+    for(int bucket=0; bucket < BUCKETS; ++bucket) {
+      Writer orc = OrcFile.createWriter(
+          new Path(partDir[1], "00000" + bucket + "_0"),
+          OrcFile.writerOptions(conf)
+              .blockPadding(false)
+              .bufferSize(1024)
+              .inspector(inspector));
+      orc.addRow(new MyRow(1, 2));
+      outputStream = (MockOutputStream) ((WriterImpl) orc).getStream();
+      orc.close();
+      outputStream.setBlocks(new MockBlock("host3", "host4"));
+    }
+
     // call getsplits
+    conf.setInt(hive_metastoreConstants.BUCKET_COUNT, BUCKETS);
     HiveInputFormat<?,?> inputFormat =
         new CombineHiveInputFormat<WritableComparable, Writable>();
-    try {
-      InputSplit[] splits = inputFormat.getSplits(conf, 1);
-      assertTrue("shouldn't reach here", false);
-    } catch (IOException ioe) {
-      assertEquals("CombineHiveInputFormat is incompatible"
-          + "  with ACID tables. Please set hive.input.format=org.apache.hadoop"
-          + ".hive.ql.io.HiveInputFormat",
-          ioe.getMessage());
+    InputSplit[] splits = inputFormat.getSplits(conf, 1);
+    assertEquals(3, splits.length);
+    HiveInputFormat.HiveInputSplit split =
+        (HiveInputFormat.HiveInputSplit) splits[0];
+    assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
+        split.inputFormatClassName());
+    assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00000",
+        split.getPath().toString());
+    assertEquals(0, split.getStart());
+    assertEquals(580, split.getLength());
+    split = (HiveInputFormat.HiveInputSplit) splits[1];
+    assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
+        split.inputFormatClassName());
+    assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00001",
+        split.getPath().toString());
+    assertEquals(0, split.getStart());
+    assertEquals(601, split.getLength());
+    CombineHiveInputFormat.CombineHiveInputSplit combineSplit =
+        (CombineHiveInputFormat.CombineHiveInputSplit) splits[2];
+    assertEquals(BUCKETS, combineSplit.getNumPaths());
+    for(int bucket=0; bucket < BUCKETS; ++bucket) {
+      assertEquals("mock:/combinationAcid/p=1/00000" + bucket + "_0",
+          combineSplit.getPath(bucket).toString());
+      assertEquals(0, combineSplit.getOffset(bucket));
+      assertEquals(227, combineSplit.getLength(bucket));
     }
+    String[] hosts = combineSplit.getLocations();
+    assertEquals(2, hosts.length);
   }
 
   @Test

Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java (original)
+++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java Fri Sep 19 23:26:48 2014
@@ -25,6 +25,8 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.DisallowTransformHook;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext.Builder;
@@ -77,8 +79,9 @@ public class TestSQLStdHiveAccessControl
     HiveConf processedConf = new HiveConf();
     processedConf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true);
     try {
-      SQLStdHiveAccessController accessController = new SQLStdHiveAccessController(null,
-          processedConf, new HadoopDefaultAuthenticator(), getCLISessionCtx());
+      HiveAuthorizerFactory authorizerFactory = new SQLStdHiveAuthorizerFactory();
+      HiveAuthorizer authorizer = authorizerFactory.createHiveAuthorizer(null, processedConf,
+          new HadoopDefaultAuthenticator(), getCLISessionCtx());
       fail("Exception expected");
     } catch (HiveAuthzPluginException e) {
       assertTrue(e.getMessage().contains(

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_cli_createtab.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_cli_createtab.q?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_cli_createtab.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_cli_createtab.q Fri Sep 19 23:26:48 2014
@@ -1,6 +1,5 @@
-set hive.test.authz.sstd.hs2.mode=true;
 set hive.users.in.admin.role=hive_admin_user;
-set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory;
 set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
 set user.name=hive_test_user;
 

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q Fri Sep 19 23:26:48 2014
@@ -46,6 +46,8 @@ explain 
 select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010;
 select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010;
 
+select count(*) from stats_tbl_part;
+select count(*)/2 from stats_tbl_part;
 drop table stats_tbl_part;
 set hive.compute.query.using.stats=false;
 set hive.stats.dbclass=jdbc:derby;

Modified: hive/branches/cbo/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out?rev=1626364&r1=1626363&r2=1626364&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out Fri Sep 19 23:26:48 2014
@@ -188,6 +188,26 @@ POSTHOOK: query: select count(*), count(
 POSTHOOK: type: QUERY
 #### A masked pattern was here ####
 2219	2219	2219	4438	2219	2219	2219	2219	65791	4294967296	99.95999908447266	0.04
+PREHOOK: query: select count(*) from stats_tbl_part
+PREHOOK: type: QUERY
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_tbl_part
+POSTHOOK: type: QUERY
+#### A masked pattern was here ####
+4541
+PREHOOK: query: select count(*)/2 from stats_tbl_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
+PREHOOK: Input: default@stats_tbl_part@dt=2010
+PREHOOK: Input: default@stats_tbl_part@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)/2 from stats_tbl_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
+POSTHOOK: Input: default@stats_tbl_part@dt=2010
+POSTHOOK: Input: default@stats_tbl_part@dt=2014
+#### A masked pattern was here ####
+2270.5
 PREHOOK: query: drop table stats_tbl_part
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@stats_tbl_part