You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/09/20 19:34:43 UTC

svn commit: r1626482 [4/6] - in /hive/branches/spark: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/ common/src/java/org/apache/hadoop/hive/conf/ data/files/ hcatalog/hc...

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java Sat Sep 20 17:34:39 2014
@@ -50,6 +50,7 @@ import org.apache.hadoop.hive.ql.plan.Ag
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
+import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
@@ -601,6 +602,30 @@ public class TestVectorGroupByOperator {
   }
 
   @Test
+  public void testCountReduce() throws HiveException {
+    testAggregateCountReduce(
+            2,
+            Arrays.asList(new Long[]{}),
+            0L);
+    testAggregateCountReduce(
+            2,
+            Arrays.asList(new Long[]{0L}),
+            0L);
+    testAggregateCountReduce(
+            2,
+            Arrays.asList(new Long[]{0L,0L}),
+            0L);
+    testAggregateCountReduce(
+            2,
+            Arrays.asList(new Long[]{0L,1L,0L}),
+            1L);
+    testAggregateCountReduce(
+        2,
+        Arrays.asList(new Long[]{13L,0L,7L,19L}),
+        39L);
+  }
+
+  @Test
   public void testCountDecimal() throws HiveException {
     testAggregateDecimal(
         "Decimal",
@@ -1210,7 +1235,7 @@ public class TestVectorGroupByOperator {
         "count",
         2,
         Arrays.asList(new Long[]{}),
-        null);
+        0L);
   }
 
   @Test
@@ -2027,6 +2052,17 @@ public class TestVectorGroupByOperator {
     testAggregateCountStarIterable (fdr, expected);
   }
 
+  public void testAggregateCountReduce (
+      int batchSize,
+      Iterable<Long> values,
+      Object expected) throws HiveException {
+
+    @SuppressWarnings("unchecked")
+    FakeVectorRowBatchFromLongIterables fdr = new FakeVectorRowBatchFromLongIterables(batchSize,
+        values);
+    testAggregateCountReduceIterable (fdr, expected);
+  }
+
 
   public static interface Validator {
     void validate (String key, Object expected, Object result);
@@ -2223,6 +2259,37 @@ public class TestVectorGroupByOperator {
     validator.validate("_total", expected, result);
   }
 
+  public void testAggregateCountReduceIterable (
+      Iterable<VectorizedRowBatch> data,
+      Object expected) throws HiveException {
+    Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
+    mapColumnNames.put("A", 0);
+    VectorizationContext ctx = new VectorizationContext(mapColumnNames, 1);
+
+    GroupByDesc desc = buildGroupByDescType(ctx, "count", "A", TypeInfoFactory.longTypeInfo);
+    VectorGroupByDesc vectorDesc = desc.getVectorDesc();
+    vectorDesc.setIsReduce(true);
+
+    VectorGroupByOperator vgo = new VectorGroupByOperator(ctx, desc);
+
+    FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(vgo);
+    vgo.initialize(null, null);
+
+    for (VectorizedRowBatch unit: data) {
+      vgo.processOp(unit,  0);
+    }
+    vgo.close(false);
+
+    List<Object> outBatchList = out.getCapturedRows();
+    assertNotNull(outBatchList);
+    assertEquals(1, outBatchList.size());
+
+    Object result = outBatchList.get(0);
+
+    Validator validator = getValidator("count");
+    validator.validate("_total", expected, result);
+  }
+
   public void testAggregateStringIterable (
       String aggregateName,
       Iterable<VectorizedRowBatch> data,

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java Sat Sep 20 17:34:39 2014
@@ -118,7 +118,6 @@ public class TestInputOutputFormat {
     TimeZone gmt = TimeZone.getTimeZone("GMT+0");
     DATE_FORMAT.setTimeZone(gmt);
     TIME_FORMAT.setTimeZone(gmt);
-    TimeZone local = TimeZone.getDefault();
   }
 
   public static class BigRow implements Writable {
@@ -560,6 +559,12 @@ public class TestInputOutputFormat {
       this.file = file;
     }
 
+    /**
+     * Set the blocks and their location for the file.
+     * Must be called after the stream is closed or the block length will be
+     * wrong.
+     * @param blocks the list of blocks
+     */
     public void setBlocks(MockBlock... blocks) {
       file.blocks = blocks;
       int offset = 0;
@@ -580,12 +585,18 @@ public class TestInputOutputFormat {
       file.content = new byte[file.length];
       System.arraycopy(buf.getData(), 0, file.content, 0, file.length);
     }
+
+    @Override
+    public String toString() {
+      return "Out stream to " + file.toString();
+    }
   }
 
   public static class MockFileSystem extends FileSystem {
     final List<MockFile> files = new ArrayList<MockFile>();
     Path workingDir = new Path("/");
 
+    @SuppressWarnings("unused")
     public MockFileSystem() {
       // empty
     }
@@ -620,7 +631,7 @@ public class TestInputOutputFormat {
           return new FSDataInputStream(new MockInputStream(file));
         }
       }
-      return null;
+      throw new IOException("File not found: " + path);
     }
 
     @Override
@@ -743,8 +754,12 @@ public class TestInputOutputFormat {
           for(MockBlock block: file.blocks) {
             if (OrcInputFormat.SplitGenerator.getOverlap(block.offset,
                 block.length, start, len) > 0) {
+              String[] topology = new String[block.hosts.length];
+              for(int i=0; i < topology.length; ++i) {
+                topology[i] = "/rack/ " + block.hosts[i];
+              }
               result.add(new BlockLocation(block.hosts, block.hosts,
-                  block.offset, block.length));
+                  topology, block.offset, block.length));
             }
           }
           return result.toArray(new BlockLocation[result.size()]);
@@ -1209,7 +1224,8 @@ public class TestInputOutputFormat {
                                          Path warehouseDir,
                                          String tableName,
                                          ObjectInspector objectInspector,
-                                         boolean isVectorized
+                                         boolean isVectorized,
+                                         int partitions
                                          ) throws IOException {
     Utilities.clearWorkMap();
     JobConf conf = new JobConf();
@@ -1218,9 +1234,20 @@ public class TestInputOutputFormat {
     conf.set("hive.vectorized.execution.enabled", Boolean.toString(isVectorized));
     conf.set("fs.mock.impl", MockFileSystem.class.getName());
     conf.set("mapred.mapper.class", ExecMapper.class.getName());
-    Path root = new Path(warehouseDir, tableName + "/p=0");
+    Path root = new Path(warehouseDir, tableName);
+    // clean out previous contents
     ((MockFileSystem) root.getFileSystem(conf)).clear();
-    conf.set("mapred.input.dir", root.toString());
+    // build partition strings
+    String[] partPath = new String[partitions];
+    StringBuilder buffer = new StringBuilder();
+    for(int p=0; p < partitions; ++p) {
+      partPath[p] = new Path(root, "p=" + p).toString();
+      if (p != 0) {
+        buffer.append(',');
+      }
+      buffer.append(partPath[p]);
+    }
+    conf.set("mapred.input.dir", buffer.toString());
     StringBuilder columnIds = new StringBuilder();
     StringBuilder columnNames = new StringBuilder();
     StringBuilder columnTypes = new StringBuilder();
@@ -1249,9 +1276,6 @@ public class TestInputOutputFormat {
     tblProps.put("columns.types", columnTypes.toString());
     TableDesc tbl = new TableDesc(OrcInputFormat.class, OrcOutputFormat.class,
         tblProps);
-    LinkedHashMap<String, String> partSpec =
-        new LinkedHashMap<String, String>();
-    PartitionDesc part = new PartitionDesc(tbl, partSpec);
 
     MapWork mapWork = new MapWork();
     mapWork.setVectorMode(isVectorized);
@@ -1260,11 +1284,16 @@ public class TestInputOutputFormat {
         new LinkedHashMap<String, ArrayList<String>>();
     ArrayList<String> aliases = new ArrayList<String>();
     aliases.add(tableName);
-    aliasMap.put(root.toString(), aliases);
-    mapWork.setPathToAliases(aliasMap);
     LinkedHashMap<String, PartitionDesc> partMap =
         new LinkedHashMap<String, PartitionDesc>();
-    partMap.put(root.toString(), part);
+    for(int p=0; p < partitions; ++p) {
+      aliasMap.put(partPath[p], aliases);
+      LinkedHashMap<String, String> partSpec =
+          new LinkedHashMap<String, String>();
+      PartitionDesc part = new PartitionDesc(tbl, partSpec);
+      partMap.put(partPath[p], part);
+    }
+    mapWork.setPathToAliases(aliasMap);
     mapWork.setPathToPartitionInfo(partMap);
     mapWork.setScratchColumnMap(new HashMap<String, Map<String, Integer>>());
     mapWork.setScratchColumnVectorTypes(new HashMap<String,
@@ -1285,6 +1314,7 @@ public class TestInputOutputFormat {
    * @throws Exception
    */
   @Test
+  @SuppressWarnings("unchecked")
   public void testVectorization() throws Exception {
     // get the object inspector for MyRow
     StructObjectInspector inspector;
@@ -1294,7 +1324,7 @@ public class TestInputOutputFormat {
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "vectorization", inspector, true);
+        "vectorization", inspector, true, 1);
 
     // write the orc file to the mock file system
     Writer writer =
@@ -1332,6 +1362,7 @@ public class TestInputOutputFormat {
    * @throws Exception
    */
   @Test
+  @SuppressWarnings("unchecked")
   public void testVectorizationWithBuckets() throws Exception {
     // get the object inspector for MyRow
     StructObjectInspector inspector;
@@ -1341,7 +1372,7 @@ public class TestInputOutputFormat {
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "vectorBuckets", inspector, true);
+        "vectorBuckets", inspector, true, 1);
 
     // write the orc file to the mock file system
     Writer writer =
@@ -1377,10 +1408,11 @@ public class TestInputOutputFormat {
 
   // test acid with vectorization, no combine
   @Test
+  @SuppressWarnings("unchecked")
   public void testVectorizationWithAcid() throws Exception {
     StructObjectInspector inspector = new BigRowInspector();
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "vectorizationAcid", inspector, true);
+        "vectorizationAcid", inspector, true, 1);
 
     // write the orc file to the mock file system
     Path partDir = new Path(conf.get("mapred.input.dir"));
@@ -1444,6 +1476,7 @@ public class TestInputOutputFormat {
 
   // test non-vectorized, non-acid, combine
   @Test
+  @SuppressWarnings("unchecked")
   public void testCombinationInputFormat() throws Exception {
     // get the object inspector for MyRow
     StructObjectInspector inspector;
@@ -1453,7 +1486,7 @@ public class TestInputOutputFormat {
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "combination", inspector, false);
+        "combination", inspector, false, 1);
 
     // write the orc file to the mock file system
     Path partDir = new Path(conf.get("mapred.input.dir"));
@@ -1516,17 +1549,25 @@ public class TestInputOutputFormat {
   public void testCombinationInputFormatWithAcid() throws Exception {
     // get the object inspector for MyRow
     StructObjectInspector inspector;
+    final int PARTITIONS = 2;
+    final int BUCKETS = 3;
     synchronized (TestOrcFile.class) {
       inspector = (StructObjectInspector)
           ObjectInspectorFactory.getReflectionObjectInspector(MyRow.class,
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
     JobConf conf = createMockExecutionEnvironment(workDir, new Path("mock:///"),
-        "combinationAcid", inspector, false);
+        "combinationAcid", inspector, false, PARTITIONS);
 
     // write the orc file to the mock file system
-    Path partDir = new Path(conf.get("mapred.input.dir"));
-    OrcRecordUpdater writer = new OrcRecordUpdater(partDir,
+    Path[] partDir = new Path[PARTITIONS];
+    String[] paths = conf.getStrings("mapred.input.dir");
+    for(int p=0; p < PARTITIONS; ++p) {
+      partDir[p] = new Path(paths[p]);
+    }
+
+    // write a base file in partition 0
+    OrcRecordUpdater writer = new OrcRecordUpdater(partDir[0],
         new AcidOutputFormat.Options(conf).maximumTransactionId(10)
             .writingBase(true).bucket(0).inspector(inspector));
     for(int i=0; i < 10; ++i) {
@@ -1534,31 +1575,68 @@ public class TestInputOutputFormat {
     }
     WriterImpl baseWriter = (WriterImpl) writer.getWriter();
     writer.close(false);
+
     MockOutputStream outputStream = (MockOutputStream) baseWriter.getStream();
-    int length0 = outputStream.file.length;
-    writer = new OrcRecordUpdater(partDir,
+    outputStream.setBlocks(new MockBlock("host1", "host2"));
+
+    // write a delta file in partition 0
+    writer = new OrcRecordUpdater(partDir[0],
         new AcidOutputFormat.Options(conf).maximumTransactionId(10)
             .writingBase(true).bucket(1).inspector(inspector));
     for(int i=10; i < 20; ++i) {
       writer.insert(10, new MyRow(i, 2*i));
     }
-    baseWriter = (WriterImpl) writer.getWriter();
+    WriterImpl deltaWriter = (WriterImpl) writer.getWriter();
+    outputStream = (MockOutputStream) deltaWriter.getStream();
     writer.close(false);
-    outputStream = (MockOutputStream) baseWriter.getStream();
     outputStream.setBlocks(new MockBlock("host1", "host2"));
 
+    // write three files in partition 1
+    for(int bucket=0; bucket < BUCKETS; ++bucket) {
+      Writer orc = OrcFile.createWriter(
+          new Path(partDir[1], "00000" + bucket + "_0"),
+          OrcFile.writerOptions(conf)
+              .blockPadding(false)
+              .bufferSize(1024)
+              .inspector(inspector));
+      orc.addRow(new MyRow(1, 2));
+      outputStream = (MockOutputStream) ((WriterImpl) orc).getStream();
+      orc.close();
+      outputStream.setBlocks(new MockBlock("host3", "host4"));
+    }
+
     // call getsplits
+    conf.setInt(hive_metastoreConstants.BUCKET_COUNT, BUCKETS);
     HiveInputFormat<?,?> inputFormat =
         new CombineHiveInputFormat<WritableComparable, Writable>();
-    try {
-      InputSplit[] splits = inputFormat.getSplits(conf, 1);
-      assertTrue("shouldn't reach here", false);
-    } catch (IOException ioe) {
-      assertEquals("CombineHiveInputFormat is incompatible"
-          + "  with ACID tables. Please set hive.input.format=org.apache.hadoop"
-          + ".hive.ql.io.HiveInputFormat",
-          ioe.getMessage());
+    InputSplit[] splits = inputFormat.getSplits(conf, 1);
+    assertEquals(3, splits.length);
+    HiveInputFormat.HiveInputSplit split =
+        (HiveInputFormat.HiveInputSplit) splits[0];
+    assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
+        split.inputFormatClassName());
+    assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00000",
+        split.getPath().toString());
+    assertEquals(0, split.getStart());
+    assertEquals(580, split.getLength());
+    split = (HiveInputFormat.HiveInputSplit) splits[1];
+    assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
+        split.inputFormatClassName());
+    assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00001",
+        split.getPath().toString());
+    assertEquals(0, split.getStart());
+    assertEquals(601, split.getLength());
+    CombineHiveInputFormat.CombineHiveInputSplit combineSplit =
+        (CombineHiveInputFormat.CombineHiveInputSplit) splits[2];
+    assertEquals(BUCKETS, combineSplit.getNumPaths());
+    for(int bucket=0; bucket < BUCKETS; ++bucket) {
+      assertEquals("mock:/combinationAcid/p=1/00000" + bucket + "_0",
+          combineSplit.getPath(bucket).toString());
+      assertEquals(0, combineSplit.getOffset(bucket));
+      assertEquals(227, combineSplit.getLength(bucket));
     }
+    String[] hosts = combineSplit.getLocations();
+    assertEquals(2, hosts.length);
   }
 
   @Test

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java Sat Sep 20 17:34:39 2014
@@ -25,6 +25,7 @@ import java.util.Collections;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
+import org.apache.hadoop.hive.ql.util.ZooKeeperHiveHelper;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.junit.Assert;
@@ -87,14 +88,14 @@ public class TestZookeeperLockManager {
   public void testGetQuorumServers() {
     conf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM, "node1");
     conf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT, "9999");
-    Assert.assertEquals("node1:9999", ZooKeeperHiveLockManager.getQuorumServers(conf));
+    Assert.assertEquals("node1:9999", ZooKeeperHiveHelper.getQuorumServers(conf));
 
     conf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM, "node1,node2,node3");
     conf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT, "9999");
-    Assert.assertEquals("node1:9999,node2:9999,node3:9999", ZooKeeperHiveLockManager.getQuorumServers(conf));
+    Assert.assertEquals("node1:9999,node2:9999,node3:9999", ZooKeeperHiveHelper.getQuorumServers(conf));
 
     conf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM, "node1:5666,node2,node3");
     conf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT, "9999");
-    Assert.assertEquals("node1:5666,node2:9999,node3:9999", ZooKeeperHiveLockManager.getQuorumServers(conf));
+    Assert.assertEquals("node1:5666,node2:9999,node3:9999", ZooKeeperHiveHelper.getQuorumServers(conf));
   }
 }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java Sat Sep 20 17:34:39 2014
@@ -198,7 +198,7 @@ public class TestUpdateDeleteSemanticAna
   @Test
   public void testInsertValues() throws Exception {
     try {
-      ReturnInfo rc = parseAndAnalyze("insert into table T values ('abc', 3), ('ghi', 5)",
+      ReturnInfo rc = parseAndAnalyze("insert into table T values ('abc', 3), ('ghi', null)",
           "testInsertValues");
 
       LOG.info(explain((SemanticAnalyzer)rc.sem, rc.plan, rc.ast.dump()));

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java Sat Sep 20 17:34:39 2014
@@ -25,6 +25,8 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.DisallowTransformHook;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext.Builder;
@@ -77,8 +79,9 @@ public class TestSQLStdHiveAccessControl
     HiveConf processedConf = new HiveConf();
     processedConf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true);
     try {
-      SQLStdHiveAccessController accessController = new SQLStdHiveAccessController(null,
-          processedConf, new HadoopDefaultAuthenticator(), getCLISessionCtx());
+      HiveAuthorizerFactory authorizerFactory = new SQLStdHiveAuthorizerFactory();
+      HiveAuthorizer authorizer = authorizerFactory.createHiveAuthorizer(null, processedConf,
+          new HadoopDefaultAuthenticator(), getCLISessionCtx());
       fail("Exception expected");
     } catch (HiveAuthzPluginException e) {
       assertTrue(e.getMessage().contains(

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/authorization_cli_createtab.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/authorization_cli_createtab.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/authorization_cli_createtab.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/authorization_cli_createtab.q Sat Sep 20 17:34:39 2014
@@ -1,6 +1,5 @@
-set hive.test.authz.sstd.hs2.mode=true;
 set hive.users.in.admin.role=hive_admin_user;
-set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory;
 set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
 set user.name=hive_test_user;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q Sat Sep 20 17:34:39 2014
@@ -2,7 +2,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.exec.reducers.max = 1;
 
 create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_all_partitioned.q Sat Sep 20 17:34:39 2014
@@ -2,7 +2,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.mapred.supports.subdirectories=true;
 
 create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_where_partitioned.q Sat Sep 20 17:34:39 2014
@@ -2,7 +2,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.mapred.supports.subdirectories=true;
 
 create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/delete_whole_partition.q Sat Sep 20 17:34:39 2014
@@ -2,7 +2,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.mapred.supports.subdirectories=true;
 
 create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/drop_index.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/drop_index.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/drop_index.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/drop_index.q Sat Sep 20 17:34:39 2014
@@ -1,2 +1,3 @@
 SET hive.exec.drop.ignorenonexistent=false;
 DROP INDEX IF EXISTS UnknownIndex ON src;
+DROP INDEX IF EXISTS UnknownIndex ON UnknownTable;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_update_delete.q Sat Sep 20 17:34:39 2014
@@ -2,7 +2,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.mapred.supports.subdirectories=true;
 
 create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q Sat Sep 20 17:34:39 2014
@@ -3,7 +3,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.mapred.supports.subdirectories=true;
 
 create table ivdp(i int,
                  de decimal(5,2),

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q Sat Sep 20 17:34:39 2014
@@ -12,12 +12,14 @@ create table acid_ivnp(ti tinyint,
                  de decimal(5,2),
                  t timestamp,
                  dt date,
+                 b boolean,
                  s string,
                  vc varchar(128),
                  ch char(12)) clustered by (i) into 2 buckets stored as orc;
 
 insert into table acid_ivnp values 
-    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'),
-    (3, 25, 6553, 429496729, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', 'its fleece was white as snow', 'a pocket full of posies', 'blue' );
+    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'),
+    (null, null, null, null, null, null, null, null, null, null, null, null, null),
+    (3, 25, 6553, null, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', false, 'its fleece was white as snow', 'a pocket full of posies', 'blue' );
 
-select ti, si, i, bi, f, d, de, t, dt, s, vc, ch from acid_ivnp order by ti;
+select * from acid_ivnp order by ti;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/insert_values_partitioned.q Sat Sep 20 17:34:39 2014
@@ -2,7 +2,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.exec.dynamic.partition.mode=nonstrict;
 
 create table acid_ivp(ti tinyint,
                  si smallint,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q Sat Sep 20 17:34:39 2014
@@ -46,6 +46,8 @@ explain 
 select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010;
 select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010;
 
+select count(*) from stats_tbl_part;
+select count(*)/2 from stats_tbl_part;
 drop table stats_tbl_part;
 set hive.compute.query.using.stats=false;
 set hive.stats.dbclass=jdbc:derby;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q Sat Sep 20 17:34:39 2014
@@ -3,7 +3,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.mapred.supports.subdirectories=true;
 
 create table acid_uami(i int,
                  de decimal(5,2),

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_all_partitioned.q Sat Sep 20 17:34:39 2014
@@ -2,7 +2,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.mapred.supports.subdirectories=true;
 
 create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/update_where_partitioned.q Sat Sep 20 17:34:39 2014
@@ -2,7 +2,6 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing=true;
-set hive.mapred.supports.subdirectories=true;
 
 create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_short_regress.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_short_regress.q?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_short_regress.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_short_regress.q Sat Sep 20 17:34:39 2014
@@ -850,3 +850,52 @@ WHERE    (((cboolean1 IS NOT NULL))
 GROUP BY cboolean1
 ORDER BY cboolean1;
 
+-- These tests verify COUNT on empty or null colulmns work correctly.
+create table test_count(i int) stored as orc;
+
+explain
+select count(*) from test_count;
+
+select count(*) from test_count;
+
+explain
+select count(i) from test_count;
+
+select count(i) from test_count;
+
+create table alltypesnull like alltypesorc;
+alter table alltypesnull set fileformat textfile;
+
+insert into table alltypesnull select null, null, null, null, null, null, null, null, null, null, null, null from alltypesorc;
+
+create table alltypesnullorc stored as orc as select * from alltypesnull;
+
+explain
+select count(*) from alltypesnullorc;
+
+select count(*) from alltypesnullorc;
+
+explain
+select count(ctinyint) from alltypesnullorc;
+
+select count(ctinyint) from alltypesnullorc;
+
+explain
+select count(cint) from alltypesnullorc;
+
+select count(cint) from alltypesnullorc;
+
+explain
+select count(cfloat) from alltypesnullorc;
+
+select count(cfloat) from alltypesnullorc;
+
+explain
+select count(cstring1) from alltypesnullorc;
+
+select count(cstring1) from alltypesnullorc;
+
+explain
+select count(cboolean1) from alltypesnullorc;
+
+select count(cboolean1) from alltypesnullorc;

Modified: hive/branches/spark/ql/src/test/results/clientpositive/drop_index.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/drop_index.q.out?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/drop_index.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/drop_index.q.out Sat Sep 20 17:34:39 2014
@@ -4,3 +4,7 @@ PREHOOK: Input: default@src
 POSTHOOK: query: DROP INDEX IF EXISTS UnknownIndex ON src
 POSTHOOK: type: DROPINDEX
 POSTHOOK: Input: default@src
+PREHOOK: query: DROP INDEX IF EXISTS UnknownIndex ON UnknownTable
+PREHOOK: type: DROPINDEX
+POSTHOOK: query: DROP INDEX IF EXISTS UnknownIndex ON UnknownTable
+POSTHOOK: type: DROPINDEX

Modified: hive/branches/spark/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out Sat Sep 20 17:34:39 2014
@@ -7,6 +7,7 @@ PREHOOK: query: create table acid_ivnp(t
                  de decimal(5,2),
                  t timestamp,
                  dt date,
+                 b boolean,
                  s string,
                  vc varchar(128),
                  ch char(12)) clustered by (i) into 2 buckets stored as orc
@@ -22,6 +23,7 @@ POSTHOOK: query: create table acid_ivnp(
                  de decimal(5,2),
                  t timestamp,
                  dt date,
+                 b boolean,
                  s string,
                  vc varchar(128),
                  ch char(12)) clustered by (i) into 2 buckets stored as orc
@@ -29,36 +31,40 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_ivnp
 PREHOOK: query: insert into table acid_ivnp values 
-    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'),
-    (3, 25, 6553, 429496729, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
+    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'),
+    (null, null, null, null, null, null, null, null, null, null, null, null, null),
+    (3, 25, 6553, null, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', false, 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__1
 PREHOOK: Output: default@acid_ivnp
 POSTHOOK: query: insert into table acid_ivnp values 
-    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'),
-    (3, 25, 6553, 429496729, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
+    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'),
+    (null, null, null, null, null, null, null, null, null, null, null, null, null),
+    (3, 25, 6553, null, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', false, 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@acid_ivnp
+POSTHOOK: Lineage: acid_ivnp.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.bi EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
-POSTHOOK: Lineage: acid_ivnp.ch EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col12, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.ch EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col13, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.d EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col6, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.de EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col7, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.dt EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col9, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.f EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: acid_ivnp.s SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.s SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col11, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.si EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.t EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col8, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.ti EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: acid_ivnp.vc EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col11, type:string, comment:), ]
-PREHOOK: query: select ti, si, i, bi, f, d, de, t, dt, s, vc, ch from acid_ivnp order by ti
+POSTHOOK: Lineage: acid_ivnp.vc EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col12, type:string, comment:), ]
+PREHOOK: query: select * from acid_ivnp order by ti
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid_ivnp
 #### A masked pattern was here ####
-POSTHOOK: query: select ti, si, i, bi, f, d, de, t, dt, s, vc, ch from acid_ivnp order by ti
+POSTHOOK: query: select * from acid_ivnp order by ti
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_ivnp
 #### A masked pattern was here ####
-1	257	65537	4294967297	3.14	3.141592654	109.23	2014-08-25 17:21:30	2014-08-25	mary had a little lamb	ring around the rosie	red         
-3	25	6553	429496729	0.14	1923.141592654	1.23	2014-08-24 17:21:30	2014-08-26	its fleece was white as snow	a pocket full of posies	blue        
+NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
+1	257	65537	4294967297	3.14	3.141592654	109.23	2014-08-25 17:21:30	2014-08-25	true	mary had a little lamb	ring around the rosie	red         
+3	25	6553	NULL	0.14	1923.141592654	1.23	2014-08-24 17:21:30	2014-08-26	false	its fleece was white as snow	a pocket full of posies	blue        

Modified: hive/branches/spark/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out Sat Sep 20 17:34:39 2014
@@ -188,6 +188,26 @@ POSTHOOK: query: select count(*), count(
 POSTHOOK: type: QUERY
 #### A masked pattern was here ####
 2219	2219	2219	4438	2219	2219	2219	2219	65791	4294967296	99.95999908447266	0.04
+PREHOOK: query: select count(*) from stats_tbl_part
+PREHOOK: type: QUERY
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_tbl_part
+POSTHOOK: type: QUERY
+#### A masked pattern was here ####
+4541
+PREHOOK: query: select count(*)/2 from stats_tbl_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
+PREHOOK: Input: default@stats_tbl_part@dt=2010
+PREHOOK: Input: default@stats_tbl_part@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)/2 from stats_tbl_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
+POSTHOOK: Input: default@stats_tbl_part@dt=2010
+POSTHOOK: Input: default@stats_tbl_part@dt=2014
+#### A masked pattern was here ####
+2270.5
 PREHOOK: query: drop table stats_tbl_part
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@stats_tbl_part

Modified: hive/branches/spark/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out Sat Sep 20 17:34:39 2014
@@ -7,6 +7,7 @@ PREHOOK: query: create table acid_ivnp(t
                  de decimal(5,2),
                  t timestamp,
                  dt date,
+                 b boolean,
                  s string,
                  vc varchar(128),
                  ch char(12)) clustered by (i) into 2 buckets stored as orc
@@ -22,6 +23,7 @@ POSTHOOK: query: create table acid_ivnp(
                  de decimal(5,2),
                  t timestamp,
                  dt date,
+                 b boolean,
                  s string,
                  vc varchar(128),
                  ch char(12)) clustered by (i) into 2 buckets stored as orc
@@ -29,36 +31,40 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_ivnp
 PREHOOK: query: insert into table acid_ivnp values 
-    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'),
-    (3, 25, 6553, 429496729, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
+    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'),
+    (null, null, null, null, null, null, null, null, null, null, null, null, null),
+    (3, 25, 6553, null, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', false, 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__1
 PREHOOK: Output: default@acid_ivnp
 POSTHOOK: query: insert into table acid_ivnp values 
-    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'),
-    (3, 25, 6553, 429496729, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
+    (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'),
+    (null, null, null, null, null, null, null, null, null, null, null, null, null),
+    (3, 25, 6553, null, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', false, 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@acid_ivnp
+POSTHOOK: Lineage: acid_ivnp.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.bi EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
-POSTHOOK: Lineage: acid_ivnp.ch EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col12, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.ch EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col13, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.d EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col6, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.de EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col7, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.dt EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col9, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.f EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: acid_ivnp.s SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.s SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col11, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.si EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.t EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col8, type:string, comment:), ]
 POSTHOOK: Lineage: acid_ivnp.ti EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: acid_ivnp.vc EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col11, type:string, comment:), ]
-PREHOOK: query: select ti, si, i, bi, f, d, de, t, dt, s, vc, ch from acid_ivnp order by ti
+POSTHOOK: Lineage: acid_ivnp.vc EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col12, type:string, comment:), ]
+PREHOOK: query: select * from acid_ivnp order by ti
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid_ivnp
 #### A masked pattern was here ####
-POSTHOOK: query: select ti, si, i, bi, f, d, de, t, dt, s, vc, ch from acid_ivnp order by ti
+POSTHOOK: query: select * from acid_ivnp order by ti
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_ivnp
 #### A masked pattern was here ####
-1	257	65537	4294967297	3.14	3.141592654	109.23	2014-08-25 17:21:30	2014-08-25	mary had a little lamb	ring around the rosie	red         
-3	25	6553	429496729	0.14	1923.141592654	1.23	2014-08-24 17:21:30	2014-08-26	its fleece was white as snow	a pocket full of posies	blue        
+NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
+1	257	65537	4294967297	3.14	3.141592654	109.23	2014-08-25 17:21:30	2014-08-25	true	mary had a little lamb	ring around the rosie	red         
+3	25	6553	NULL	0.14	1923.141592654	1.23	2014-08-24 17:21:30	2014-08-26	false	its fleece was white as snow	a pocket full of posies	blue        

Modified: hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out?rev=1626482&r1=1626481&r2=1626482&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out Sat Sep 20 17:34:39 2014
@@ -6618,3 +6618,623 @@ POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 false	11.0	-11.0	-2.389090909090909	-17881597706	-1.7881597716175E10	3.8953387713327066E17	6.0	-0.8249999999999993	-2454.8879999999995	3.8953385925167296E17	-2145884705	1.66288903197104486E18	0.8249999999999993	4.7840233756130287E-17	4.098424268084119E-17	0.8249999999999993	-1051696618	28.692556844886422	2.980633855245E9	-4.032330473245E9	85.79562278396777	4.032330473245E9	-3983699.3106060605	3983699.3106060605	4.1896430920933255E15
 true	79.553	-79.553	-0.33034580136836733	-401322621137	-4.01322621147175E11	7.9255373737244976E16	34.727455139160156	-69.3780014038086	4856.6352637899645	7.9254972414623824E16	-2130544867	2.30133924842409523E18	69.3780014038086	3.456813247089758E-17	2.0387240975807185E-18	69.3780014038086	2182477964777	34.654968050508266	2.959326820263E9	2.179518637956737E12	9461.197516216069	-2.179518637956737E12	4.592756659884259E8	-4.592756659884259E8	1.002359020778021E21
+PREHOOK: query: -- These tests verify COUNT on empty or null colulmns work correctly.
+create table test_count(i int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_count
+POSTHOOK: query: -- These tests verify COUNT on empty or null colulmns work correctly.
+create table test_count(i int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_count
+PREHOOK: query: explain
+select count(*) from test_count
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from test_count
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_count
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from test_count
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_count
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_count
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_count
+#### A masked pattern was here ####
+0
+PREHOOK: query: explain
+select count(i) from test_count
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(i) from test_count
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_count
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Select Operator
+                    expressions: i (type: int)
+                    outputColumnNames: i
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(i)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(i) from test_count
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_count
+#### A masked pattern was here ####
+POSTHOOK: query: select count(i) from test_count
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_count
+#### A masked pattern was here ####
+0
+PREHOOK: query: create table alltypesnull like alltypesorc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypesnull
+POSTHOOK: query: create table alltypesnull like alltypesorc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypesnull
+PREHOOK: query: alter table alltypesnull set fileformat textfile
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@alltypesnull
+PREHOOK: Output: default@alltypesnull
+POSTHOOK: query: alter table alltypesnull set fileformat textfile
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@alltypesnull
+POSTHOOK: Output: default@alltypesnull
+PREHOOK: query: insert into table alltypesnull select null, null, null, null, null, null, null, null, null, null, null, null from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@alltypesnull
+POSTHOOK: query: insert into table alltypesnull select null, null, null, null, null, null, null, null, null, null, null, null from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@alltypesnull
+POSTHOOK: Lineage: alltypesnull.cbigint EXPRESSION []
+POSTHOOK: Lineage: alltypesnull.cboolean1 EXPRESSION []
+POSTHOOK: Lineage: alltypesnull.cboolean2 EXPRESSION []
+POSTHOOK: Lineage: alltypesnull.cdouble EXPRESSION []
+POSTHOOK: Lineage: alltypesnull.cfloat EXPRESSION []
+POSTHOOK: Lineage: alltypesnull.cint EXPRESSION []
+POSTHOOK: Lineage: alltypesnull.csmallint EXPRESSION []
+POSTHOOK: Lineage: alltypesnull.cstring1 SIMPLE []
+POSTHOOK: Lineage: alltypesnull.cstring2 SIMPLE []
+POSTHOOK: Lineage: alltypesnull.ctimestamp1 EXPRESSION []
+POSTHOOK: Lineage: alltypesnull.ctimestamp2 EXPRESSION []
+POSTHOOK: Lineage: alltypesnull.ctinyint EXPRESSION []
+PREHOOK: query: create table alltypesnullorc stored as orc as select * from alltypesnull
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesnull
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypesnullorc
+POSTHOOK: query: create table alltypesnullorc stored as orc as select * from alltypesnull
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesnull
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypesnullorc
+PREHOOK: query: explain
+select count(*) from alltypesnullorc
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from alltypesnullorc
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesnullorc
+                  Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from alltypesnullorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from alltypesnullorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+12288
+PREHOOK: query: explain
+select count(ctinyint) from alltypesnullorc
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(ctinyint) from alltypesnullorc
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesnullorc
+                  Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: ctinyint
+                    Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(ctinyint)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(ctinyint) from alltypesnullorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(ctinyint) from alltypesnullorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+0
+PREHOOK: query: explain
+select count(cint) from alltypesnullorc
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(cint) from alltypesnullorc
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesnullorc
+                  Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cint (type: int)
+                    outputColumnNames: cint
+                    Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(cint)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(cint) from alltypesnullorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(cint) from alltypesnullorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+0
+PREHOOK: query: explain
+select count(cfloat) from alltypesnullorc
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(cfloat) from alltypesnullorc
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesnullorc
+                  Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cfloat (type: float)
+                    outputColumnNames: cfloat
+                    Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(cfloat)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(cfloat) from alltypesnullorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(cfloat) from alltypesnullorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+0
+PREHOOK: query: explain
+select count(cstring1) from alltypesnullorc
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(cstring1) from alltypesnullorc
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesnullorc
+                  Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cstring1 (type: string)
+                    outputColumnNames: cstring1
+                    Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(cstring1)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(cstring1) from alltypesnullorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(cstring1) from alltypesnullorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+0
+PREHOOK: query: explain
+select count(cboolean1) from alltypesnullorc
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(cboolean1) from alltypesnullorc
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesnullorc
+                  Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cboolean1 (type: boolean)
+                    outputColumnNames: cboolean1
+                    Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(cboolean1)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(cboolean1) from alltypesnullorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(cboolean1) from alltypesnullorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesnullorc
+#### A masked pattern was here ####
+0