You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by go...@apache.org on 2014/10/02 22:58:54 UTC

svn commit: r1629068 [3/11] - in /hive/branches/branch-0.14: data/files/ itests/src/test/resources/ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ ql/src/test/queries/clientpositive/ ql/src/test/results/clientpositive/ ql/src/test/results/clientpos...

Modified: hive/branches/branch-0.14/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/itests/src/test/resources/testconfiguration.properties?rev=1629068&r1=1629067&r2=1629068&view=diff
==============================================================================
--- hive/branches/branch-0.14/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/branch-0.14/itests/src/test/resources/testconfiguration.properties Thu Oct  2 20:58:53 2014
@@ -118,6 +118,7 @@ minitez.query.files.shared=alter_merge_2
   orc_merge7.q,\
   orc_merge_incompat1.q,\
   orc_merge_incompat2.q,\
+  orc_vectorization_ppd.q,\
   parallel.q,\
   ptf.q,\
   sample1.q,\
@@ -154,26 +155,54 @@ minitez.query.files.shared=alter_merge_2
   update_where_non_partitioned.q,\
   update_where_partitioned.q,\
   update_two_cols.q,\
+  vector_between_in.q,\
   vector_cast_constant.q,\
   vector_char_simple.q,\
   vector_data_types.q,\
   vector_decimal_aggregate.q,\
+  vector_distinct_2.q,\
+  vector_elt.q,\
+  vector_groupby_3.q,\
   vector_left_outer_join.q,\
   vector_mapjoin_reduce.q,\
+  vector_non_string_partition.q,\
+  vector_orderby_5.q,\
   vector_string_concat.q,\
   vector_varchar_simple.q,\
   vectorization_0.q,\
+  vectorization_1.q,\
+  vectorization_10.q,\
+  vectorization_11.q,\
   vectorization_12.q,\
   vectorization_13.q,\
   vectorization_14.q,\
   vectorization_15.q,\
+  vectorization_16.q,\
+  vectorization_2.q,\
+  vectorization_3.q,\
+  vectorization_4.q,\
+  vectorization_5.q,\
+  vectorization_6.q,\
   vectorization_9.q,\
+  vectorization_decimal_date.q,\
+  vectorization_div0.q,\
+  vectorization_nested_udf.q,\
+  vectorization_not.q,\
+  vectorization_part.q,\
   vectorization_part_project.q,\
+  vectorization_pushdown.q,\
   vectorization_short_regress.q,\
+  vectorized_bucketmapjoin1.q,\
+  vectorized_case.q,\
+  vectorized_context.q,\
   vectorized_mapjoin.q,\
+  vectorized_math_funcs.q,\
   vectorized_nested_mapjoin.q,\
+  vectorized_parquet.q,\
   vectorized_ptf.q,\
+  vectorized_rcfile_columnar.q,\
   vectorized_shufflejoin.q,\
+  vectorized_string_funcs.q,\
   vectorized_timestamp_funcs.q,\
   auto_sortmerge_join_1.q,\
   auto_sortmerge_join_10.q,\

Modified: hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java?rev=1629068&r1=1629067&r2=1629068&view=diff
==============================================================================
--- hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java (original)
+++ hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java Thu Oct  2 20:58:53 2014
@@ -653,6 +653,21 @@ public class VectorGroupByOperator exten
   /**
    * Sorted reduce group batch processing mode. Each input VectorizedRowBatch will have the
    * same key.  On endGroup (or close), the intermediate values are flushed.
+   *
+   * We build the output rows one-at-a-time in the output vectorized row batch (outputBatch)
+   * in 2 steps:
+   *
+   *   1) Just after startGroup, we copy the group key to the next position in the output batch,
+   *      but don't increment the size in the batch (yet).  This is done with the copyGroupKey
+   *      method of VectorGroupKeyHelper.  The next position is outputBatch.size
+   *
+   *      We know the same key is used for the whole batch (i.e. repeating) since that is how
+   *      vectorized reduce-shuffle feeds the batches to us.
+   *
+   *   2) Later at endGroup after reduce-shuffle has fed us all the input batches for the group,
+   *      we fill in the aggregation columns in outputBatch at outputBatch.size.  Our method 
+   *      writeGroupRow does this and finally increments outputBatch.size.
+   *
    */
   private class ProcessingModeGroupBatches extends ProcessingModeBase {
 

Modified: hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java?rev=1629068&r1=1629067&r2=1629068&view=diff
==============================================================================
--- hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java (original)
+++ hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java Thu Oct  2 20:58:53 2014
@@ -42,19 +42,38 @@ public class VectorGroupKeyHelper extend
     finishAdding();
   }
 
+  /*
+   * This helper method copies the group keys from one vectorized row batch to another,
+   * but does not increment the outputBatch.size (i.e. the next output position).
+   * 
+   * It was designed for VectorGroupByOperator's sorted reduce group batch processing mode
+   * to copy the group keys at startGroup.
+   */
   public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outputBatch,
           DataOutputBuffer buffer) throws HiveException {
-    // Grab the key at index 0.  We don't care about selected or repeating since all keys in the input batch are the same.
     for(int i = 0; i< longIndices.length; ++i) {
       int keyIndex = longIndices[i];
       LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[keyIndex];
       LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[keyIndex];
+
+      // This vectorized code pattern says: 
+      //    If the input batch has no nulls at all (noNulls is true) OR
+      //    the input row is NOT NULL, copy the value.
+      //
+      //    Otherwise, we have a NULL input value.  The standard way to mark a NULL in the
+      //    output batch is: turn off noNulls indicating there is at least one NULL in the batch
+      //    and mark that row as NULL.
+      //
+      //    When a vectorized row batch is reset, noNulls is set to true and the isNull array
+      //    is zeroed.
+      //
+      // We grab the key at index 0.  We don't care about selected or repeating since all keys
+      // in the input batch are suppose to be the same.
+      //
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
         outputColumnVector.vector[outputBatch.size] = inputColumnVector.vector[0];
-      } else if (inputColumnVector.noNulls ){
-        outputColumnVector.noNulls = false;
-        outputColumnVector.isNull[outputBatch.size] = true;
       } else {
+        outputColumnVector.noNulls = false;
         outputColumnVector.isNull[outputBatch.size] = true;
       }
     }
@@ -64,10 +83,8 @@ public class VectorGroupKeyHelper extend
       DoubleColumnVector outputColumnVector = (DoubleColumnVector) outputBatch.cols[keyIndex];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
         outputColumnVector.vector[outputBatch.size] = inputColumnVector.vector[0];
-      } else if (inputColumnVector.noNulls ){
-        outputColumnVector.noNulls = false;
-        outputColumnVector.isNull[outputBatch.size] = true;
       } else {
+        outputColumnVector.noNulls = false;
         outputColumnVector.isNull[outputBatch.size] = true;
       }
     }
@@ -85,10 +102,8 @@ public class VectorGroupKeyHelper extend
           throw new IllegalStateException("bad write", ioe);
         }
         outputColumnVector.setRef(outputBatch.size, buffer.getData(), start, length);
-      } else if (inputColumnVector.noNulls ){
-        outputColumnVector.noNulls = false;
-        outputColumnVector.isNull[outputBatch.size] = true;
       } else {
+        outputColumnVector.noNulls = false;
         outputColumnVector.isNull[outputBatch.size] = true;
       }
     }
@@ -98,10 +113,8 @@ public class VectorGroupKeyHelper extend
       DecimalColumnVector outputColumnVector = (DecimalColumnVector) outputBatch.cols[keyIndex];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
         outputColumnVector.vector[outputBatch.size] = inputColumnVector.vector[0];
-      } else if (inputColumnVector.noNulls ){
-        outputColumnVector.noNulls = false;
-        outputColumnVector.isNull[outputBatch.size] = true;
       } else {
+        outputColumnVector.noNulls = false;
         outputColumnVector.isNull[outputBatch.size] = true;
       }
     }

Added: hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_distinct_2.q
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_distinct_2.q?rev=1629068&view=auto
==============================================================================
--- hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_distinct_2.q (added)
+++ hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_distinct_2.q Thu Oct  2 20:58:53 2014
@@ -0,0 +1,43 @@
+SET hive.vectorized.execution.enabled=true;
+
+create table vectortab2k(
+            t tinyint,
+            si smallint,
+            i int,
+            b bigint,
+            f float,
+            d double,
+            dc decimal(38,18),
+            bo boolean,
+            s string,
+            s2 string,
+            ts timestamp,
+            ts2 timestamp,
+            dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k;
+
+create table vectortab2korc(
+            t tinyint,
+            si smallint,
+            i int,
+            b bigint,
+            f float,
+            d double,
+            dc decimal(38,18),
+            bo boolean,
+            s string,
+            s2 string,
+            ts timestamp,
+            ts2 timestamp,
+            dt date)
+STORED AS ORC;
+
+INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
+
+explain
+select distinct s, t from vectortab2korc;
+
+select distinct s, t from vectortab2korc;
\ No newline at end of file

Added: hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_groupby_3.q
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_groupby_3.q?rev=1629068&view=auto
==============================================================================
--- hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_groupby_3.q (added)
+++ hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_groupby_3.q Thu Oct  2 20:58:53 2014
@@ -0,0 +1,43 @@
+SET hive.vectorized.execution.enabled=true;
+
+create table vectortab2k(
+            t tinyint,
+            si smallint,
+            i int,
+            b bigint,
+            f float,
+            d double,
+            dc decimal(38,18),
+            bo boolean,
+            s string,
+            s2 string,
+            ts timestamp,
+            ts2 timestamp,
+            dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k;
+
+create table vectortab2korc(
+            t tinyint,
+            si smallint,
+            i int,
+            b bigint,
+            f float,
+            d double,
+            dc decimal(38,18),
+            bo boolean,
+            s string,
+            s2 string,
+            ts timestamp,
+            ts2 timestamp,
+            dt date)
+STORED AS ORC;
+
+INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
+
+explain
+select s, t, max(b) from vectortab2korc group by s, t;
+
+select s, t, max(b) from vectortab2korc group by s, t;
\ No newline at end of file

Added: hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_orderby_5.q
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_orderby_5.q?rev=1629068&view=auto
==============================================================================
--- hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_orderby_5.q (added)
+++ hive/branches/branch-0.14/ql/src/test/queries/clientpositive/vector_orderby_5.q Thu Oct  2 20:58:53 2014
@@ -0,0 +1,43 @@
+SET hive.vectorized.execution.enabled=true;
+
+create table vectortab2k(
+            t tinyint,
+            si smallint,
+            i int,
+            b bigint,
+            f float,
+            d double,
+            dc decimal(38,18),
+            bo boolean,
+            s string,
+            s2 string,
+            ts timestamp,
+            ts2 timestamp,
+            dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k;
+
+create table vectortab2korc(
+            t tinyint,
+            si smallint,
+            i int,
+            b bigint,
+            f float,
+            d double,
+            dc decimal(38,18),
+            bo boolean,
+            s string,
+            s2 string,
+            ts timestamp,
+            ts2 timestamp,
+            dt date)
+STORED AS ORC;
+
+INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
+
+explain
+select bo, max(b) from vectortab2korc group by bo order by bo desc;
+
+select bo, max(b) from vectortab2korc group by bo order by bo desc;
\ No newline at end of file

Added: hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out?rev=1629068&view=auto
==============================================================================
--- hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out (added)
+++ hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out Thu Oct  2 20:58:53 2014
@@ -0,0 +1,288 @@
+PREHOOK: query: -- create table with 1000 rows
+create table srcorc(key string, value string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcorc
+POSTHOOK: query: -- create table with 1000 rows
+create table srcorc(key string, value string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcorc
+PREHOOK: query: insert overwrite table srcorc select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@srcorc
+POSTHOOK: query: insert overwrite table srcorc select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@srcorc
+POSTHOOK: Lineage: srcorc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcorc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table srcorc select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@srcorc
+POSTHOOK: query: insert into table srcorc select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@srcorc
+POSTHOOK: Lineage: srcorc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcorc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- load table with each row group having 1000 rows and stripe 1 & 2 having 5000 & 2000 rows respectively
+create table if not exists vectororc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: -- load table with each row group having 1000 rows and stripe 1 & 2 having 5000 & 2000 rows respectively
+create table if not exists vectororc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectororc
+PREHOOK: query: -- insert creates separate orc files
+insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: -- insert creates separate orc files
+insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 SIMPLE []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select null, "b", rand(2), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select null, "b", rand(2), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 EXPRESSION []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select null, "c", rand(3), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select null, "c", rand(3), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 EXPRESSION []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select "apple", "d", rand(4), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select "apple", "d", rand(4), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 SIMPLE []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select null, "e", rand(5), "z" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select null, "e", rand(5), "z" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 EXPRESSION []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select "apple", "f", rand(6), "z" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select "apple", "f", rand(6), "z" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 SIMPLE []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select null, "g", rand(7), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select null, "g", rand(7), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 EXPRESSION []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: -- since vectororc table has multiple orc file we will load them into a single file using another table
+create table if not exists testorc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testorc
+POSTHOOK: query: -- since vectororc table has multiple orc file we will load them into a single file using another table
+create table if not exists testorc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testorc
+PREHOOK: query: insert overwrite table testorc select * from vectororc order by s2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@vectororc
+PREHOOK: Output: default@testorc
+POSTHOOK: query: insert overwrite table testorc select * from vectororc order by s2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@vectororc
+POSTHOOK: Output: default@testorc
+POSTHOOK: Lineage: testorc.d SIMPLE [(vectororc)vectororc.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: testorc.s1 SIMPLE [(vectororc)vectororc.FieldSchema(name:s1, type:string, comment:null), ]
+POSTHOOK: Lineage: testorc.s2 SIMPLE [(vectororc)vectororc.FieldSchema(name:s2, type:string, comment:null), ]
+POSTHOOK: Lineage: testorc.s3 SIMPLE [(vectororc)vectororc.FieldSchema(name:s3, type:string, comment:null), ]
+PREHOOK: query: -- row group (1,4) from stripe 1 and row group (1) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s1 is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- row group (1,4) from stripe 1 and row group (1) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s1 is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+3000	1505
+PREHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s1 is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s1 is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+3000	1505
+PREHOOK: query: -- row group (2,3,5) from stripe 1 and row group (2) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- row group (2,3,5) from stripe 1 and row group (2) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+4000	2006
+PREHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+4000	2006
+PREHOOK: query: -- last row group of stripe 1 and first row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s3="z"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- last row group of stripe 1 and first row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s3="z"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+2000	1011
+PREHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s3="z"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s3="z"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+2000	1011
+PREHOOK: query: -- first row group of stripe 1 and last row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- first row group of stripe 1 and last row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+2000	1006
+PREHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+2000	1006
+PREHOOK: query: drop table srcorc
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@srcorc
+POSTHOOK: query: drop table srcorc
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@srcorc
+PREHOOK: query: drop table vectororc
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@vectororc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: drop table vectororc
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@vectororc
+POSTHOOK: Output: default@vectororc
+PREHOOK: query: drop table testorc
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@testorc
+PREHOOK: Output: default@testorc
+POSTHOOK: query: drop table testorc
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@testorc
+POSTHOOK: Output: default@testorc

Added: hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/vector_between_in.q.out?rev=1629068&view=auto
==============================================================================
--- hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/vector_between_in.q.out (added)
+++ hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/vector_between_in.q.out Thu Oct  2 20:58:53 2014
@@ -0,0 +1,740 @@
+PREHOOK: query: CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@decimal_date_test
+POSTHOOK: query: CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@decimal_date_test
+PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean)
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: cdate (type: date)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: date)
+                        sort order: +
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: date)
+                outputColumnNames: _col0
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (not (cdate) IN (1969-10-26, 1969-07-14, 1970-01-21)) (type: boolean)
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean)
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: cdecimal1 (type: decimal(20,10))
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: decimal(20,10))
+                        sort order: +
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: decimal(20,10))
+                outputColumnNames: _col0
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (not (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568)) (type: boolean)
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean)
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: cdate (type: date)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: date)
+                        sort order: +
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: date)
+                outputColumnNames: _col0
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: cdate NOT BETWEEN 1968-05-01 AND 1971-09-01 (type: boolean)
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: cdate (type: date)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: date)
+                        sort order: +
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: date)
+                outputColumnNames: _col0
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: cdecimal1 BETWEEN -20 AND 45.9918918919 (type: boolean)
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: cdecimal1 (type: decimal(20,10))
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: decimal(20,10))
+                        sort order: +
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: decimal(20,10))
+                outputColumnNames: _col0
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean)
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+1969-07-14
+1969-07-14
+1969-07-14
+1969-07-14
+1969-07-14
+1969-07-14
+1969-07-14
+1969-07-14
+1969-07-14
+1969-07-14
+1969-07-14
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+1969-10-26
+PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+5996
+PREHOOK: query: SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+-3367.6517567568
+-3367.6517567568
+-3367.6517567568
+-3367.6517567568
+-3367.6517567568
+-3367.6517567568
+-3367.6517567568
+881.0135135135
+2365.8945945946
+PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+9165
+PREHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+1969-12-30
+1969-12-30
+1969-12-30
+1969-12-30
+1969-12-30
+1969-12-30
+1969-12-30
+1969-12-30
+1969-12-31
+1969-12-31
+1969-12-31
+1969-12-31
+1969-12-31
+1969-12-31
+1969-12-31
+1969-12-31
+1969-12-31
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-01
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+1970-01-02
+PREHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+1968-04-07
+1968-04-09
+1968-04-10
+1968-04-14
+1968-04-16
+1968-04-16
+1968-04-19
+1968-04-23
+1968-04-25
+1968-04-26
+1968-04-27
+1968-04-27
+1968-04-27
+1968-04-29
+1968-04-29
+1968-04-29
+1968-04-29
+1968-04-30
+1971-09-02
+1971-09-04
+1971-09-06
+1971-09-06
+1971-09-06
+1971-09-09
+1971-09-09
+1971-09-15
+1971-09-17
+1971-09-18
+1971-09-21
+1971-09-21
+1971-09-21
+1971-09-22
+1971-09-22
+1971-09-25
+PREHOOK: query: SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+-18.5162162162
+-17.3216216216
+-16.7243243243
+-16.1270270270
+-15.5297297297
+-10.7513513514
+-9.5567567568
+-8.3621621622
+-5.9729729730
+-3.5837837838
+4.1810810811
+4.7783783784
+4.7783783784
+5.3756756757
+5.9729729730
+5.9729729730
+11.3486486486
+11.3486486486
+11.9459459459
+14.9324324324
+19.1135135135
+20.3081081081
+22.1000000000
+24.4891891892
+33.4486486486
+34.6432432432
+40.0189189189
+42.4081081081
+43.0054054054
+44.2000000000
+44.2000000000
+44.7972972973
+45.9918918919
+PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+6172

Modified: hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out?rev=1629068&r1=1629067&r2=1629068&view=diff
==============================================================================
--- hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out (original)
+++ hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out Thu Oct  2 20:58:53 2014
@@ -113,7 +113,7 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
-0	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
 -3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	6	6984454.21109769200000	-4033.445769230769	6967702.86724384584710
 -563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	2	-617.56077692307690	-4033.445769230769	-4651.00654615384590
 762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2	6984454.21109769200000	1833.9456923076925	6986288.15678999969250