You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/08/10 03:33:55 UTC

svn commit: r1617040 [12/13] - in /hive/branches/spark: ./ ant/src/org/apache/hadoop/hive/ant/ beeline/ beeline/src/java/org/apache/hive/beeline/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ data/conf/ dat...

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBasePad.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBasePad.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBasePad.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBasePad.java Sun Aug 10 01:33:50 2014
@@ -48,9 +48,9 @@ public abstract class GenericUDFBasePad 
       throw new UDFArgumentException(udfName + " requires three arguments. Found :"
 	  + arguments.length);
     }
-    converter1 = checkArguments(arguments, 0);
-    converter2 = checkArguments(arguments, 1);
-    converter3 = checkArguments(arguments, 2);
+    converter1 = checkTextArguments(arguments, 0);
+    converter2 = checkIntArguments(arguments, 1);
+    converter3 = checkTextArguments(arguments, 2);
     return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
   }
 
@@ -91,31 +91,39 @@ public abstract class GenericUDFBasePad 
   protected abstract void performOp(byte[] data, byte[] txt, byte[] padTxt, int len, Text str,
       Text pad);
 
-  private Converter checkArguments(ObjectInspector[] arguments, int i)
+  // Convert input arguments to Text, if necessary.
+  private Converter checkTextArguments(ObjectInspector[] arguments, int i)
     throws UDFArgumentException {
     if (arguments[i].getCategory() != ObjectInspector.Category.PRIMITIVE) {
       throw new UDFArgumentTypeException(i + 1, "Only primitive type arguments are accepted but "
-	  + arguments[i].getTypeName() + " is passed. as  arguments");
+      + arguments[i].getTypeName() + " is passed. as  arguments");
+    }
+
+    Converter converter = ObjectInspectorConverters.getConverter((PrimitiveObjectInspector) arguments[i],
+          PrimitiveObjectInspectorFactory.writableStringObjectInspector);
+
+    return converter;
+  }
+
+  private Converter checkIntArguments(ObjectInspector[] arguments, int i)
+    throws UDFArgumentException {
+    if (arguments[i].getCategory() != ObjectInspector.Category.PRIMITIVE) {
+      throw new UDFArgumentTypeException(i + 1, "Only primitive type arguments are accepted but "
+      + arguments[i].getTypeName() + " is passed. as  arguments");
     }
     PrimitiveCategory inputType = ((PrimitiveObjectInspector) arguments[i]).getPrimitiveCategory();
     Converter converter;
     switch (inputType) {
-    case STRING:
-    case CHAR:
-    case VARCHAR:
-      converter = ObjectInspectorConverters.getConverter((PrimitiveObjectInspector) arguments[i],
-	  PrimitiveObjectInspectorFactory.writableStringObjectInspector);
-      break;
     case INT:
     case SHORT:
     case BYTE:
       converter = ObjectInspectorConverters.getConverter((PrimitiveObjectInspector) arguments[i],
-	  PrimitiveObjectInspectorFactory.writableIntObjectInspector);
+      PrimitiveObjectInspectorFactory.writableIntObjectInspector);
       break;
     default:
       throw new UDFArgumentTypeException(i + 1, udfName
-	  + " only takes STRING/CHAR/INT/SHORT/BYTE/VARCHAR types as " + (i + 1) + "-ths argument, got "
-	  + inputType);
+      + " only takes INT/SHORT/BYTE types as " + (i + 1) + "-ths argument, got "
+      + inputType);
     }
     return converter;
   }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatchCtx.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatchCtx.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatchCtx.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatchCtx.java Sun Aug 10 01:33:50 2014
@@ -49,6 +49,7 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.io.BooleanWritable;
+import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.FloatWritable;
 import org.apache.hadoop.io.IntWritable;
@@ -285,6 +286,15 @@ public class TestVectorizedRowBatchCtx {
             Assert.assertEquals(true, dcv.vector[i] == ((DoubleWritable) writableCol).get());
           }
             break;
+          case BINARY: {
+            BytesColumnVector bcv = (BytesColumnVector) batch.cols[j];
+              BytesWritable colBinary = (BytesWritable) writableCol;
+              BytesWritable batchBinary = (BytesWritable) bcv.getWritableObject(i);
+              byte[] a = colBinary.getBytes();
+              byte[] b = batchBinary.getBytes();
+              Assert.assertEquals(true, a.equals(b));
+          }
+            break;
           case STRING: {
             BytesColumnVector bcv = (BytesColumnVector) batch.cols[j];
             Text colText = (Text) writableCol;

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java Sun Aug 10 01:33:50 2014
@@ -107,7 +107,7 @@ public class TestVectorizer {
     gbyOp.setConf(desc);
 
     Vectorizer v = new Vectorizer();
-    Assert.assertTrue(v.validateOperator(gbyOp));
+    Assert.assertTrue(v.validateMapWorkOperator(gbyOp));
     VectorGroupByOperator vectorOp = (VectorGroupByOperator) v.vectorizeOperator(gbyOp, vContext);
     Assert.assertEquals(VectorUDAFSumLong.class, vectorOp.getAggregators()[0].getClass());
     VectorUDAFSumLong udaf = (VectorUDAFSumLong) vectorOp.getAggregators()[0];
@@ -187,7 +187,7 @@ public class TestVectorizer {
     mop.setConf(mjdesc);
  
     Vectorizer vectorizer = new Vectorizer();
-    Assert.assertTrue(vectorizer.validateOperator(mop));
+    Assert.assertTrue(vectorizer.validateMapWorkOperator(mop));
   }
 
   
@@ -203,6 +203,6 @@ public class TestVectorizer {
       mop.setConf(mjdesc);
     
       Vectorizer vectorizer = new Vectorizer();
-      Assert.assertTrue(vectorizer.validateOperator(mop)); 
+      Assert.assertTrue(vectorizer.validateMapWorkOperator(mop)); 
   }
 }

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_filter.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_filter.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_filter.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_filter.q Sun Aug 10 01:33:50 2014
@@ -15,76 +15,76 @@ load data local inpath '../../data/files
 insert overwrite table loc_orc select * from loc_staging;
 
 -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc;
+explain select * from loc_orc;
 
 -- column stats are not COMPLETE, so stats are not updated
 -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc where state='OH';
+explain select * from loc_orc where state='OH';
 
 analyze table loc_orc compute statistics for columns state,locid,zip,year;
 
 -- state column has 5 distincts. numRows/countDistincts
 -- numRows: 1 rawDataSize: 102
-explain extended select * from loc_orc where state='OH';
+explain select * from loc_orc where state='OH';
 
 -- not equals comparison shouldn't affect number of rows
 -- numRows: 8 rawDataSize: 804
-explain extended select * from loc_orc where state!='OH';
-explain extended select * from loc_orc where state<>'OH';
+explain select * from loc_orc where state!='OH';
+explain select * from loc_orc where state<>'OH';
 
 -- nulls are treated as constant equality comparison
 -- numRows: 1 rawDataSize: 102
-explain extended select * from loc_orc where zip is null;
+explain select * from loc_orc where zip is null;
 -- numRows: 1 rawDataSize: 102
-explain extended select * from loc_orc where !(zip is not null);
+explain select * from loc_orc where !(zip is not null);
 
 -- not nulls are treated as inverse of nulls
 -- numRows: 7 rawDataSize: 702
-explain extended select * from loc_orc where zip is not null;
+explain select * from loc_orc where zip is not null;
 -- numRows: 7 rawDataSize: 702
-explain extended select * from loc_orc where !(zip is null);
+explain select * from loc_orc where !(zip is null);
 
 -- NOT evaluation. true will pass all rows, false will not pass any rows
 -- numRows: 8 rawDataSize: 804
-explain extended select * from loc_orc where !false;
+explain select * from loc_orc where !false;
 -- numRows: 0 rawDataSize: 0
-explain extended select * from loc_orc where !true;
+explain select * from loc_orc where !true;
 
 -- Constant evaluation. true will pass all rows, false will not pass any rows
 -- numRows: 8 rawDataSize: 804
-explain extended select * from loc_orc where true;
+explain select * from loc_orc where true;
 -- numRows: 8 rawDataSize: 804
-explain extended select * from loc_orc where 'foo';
+explain select * from loc_orc where 'foo';
 -- numRows: 8 rawDataSize: 804
-explain extended select * from loc_orc where true = true;
+explain select * from loc_orc where true = true;
 -- numRows: 0 rawDataSize: 0
-explain extended select * from loc_orc where false = true;
+explain select * from loc_orc where false = true;
 -- numRows: 0 rawDataSize: 0
-explain extended select * from loc_orc where 'foo' = 'bar';
+explain select * from loc_orc where 'foo' = 'bar';
 -- numRows: 0 rawDataSize: 0
-explain extended select * from loc_orc where false;
+explain select * from loc_orc where false;
 
 -- OR evaluation. 1 row for OH and 1 row for CA
 -- numRows: 2 rawDataSize: 204
-explain extended select * from loc_orc where state='OH' or state='CA';
+explain select * from loc_orc where state='OH' or state='CA';
 
 -- AND evaluation. cascadingly apply rules. 8/2 = 4/2 = 2
 -- numRows: 2 rawDataSize: 204
-explain extended select * from loc_orc where year=2001 and year is null;
+explain select * from loc_orc where year=2001 and year is null;
 -- numRows: 1 rawDataSize: 102
-explain extended select * from loc_orc where year=2001 and state='OH' and state='FL';
+explain select * from loc_orc where year=2001 and state='OH' and state='FL';
 
 -- AND and OR together. left expr will yield 1 row and right will yield 1 row
 -- numRows: 3 rawDataSize: 306
-explain extended select * from loc_orc where (year=2001 and year is null) or (state='CA');
+explain select * from loc_orc where (year=2001 and year is null) or (state='CA');
 
 -- AND and OR together. left expr will yield 8 rows and right will yield 1 row
 -- numRows: 1 rawDataSize: 102
-explain extended select * from loc_orc where (year=2001 or year is null) and (state='CA');
+explain select * from loc_orc where (year=2001 or year is null) and (state='CA');
 
 -- all inequality conditions rows/3 is the rules
 -- numRows: 2 rawDataSize: 204
-explain extended select * from loc_orc where locid < 30;
-explain extended select * from loc_orc where locid > 30;
-explain extended select * from loc_orc where locid <= 30;
-explain extended select * from loc_orc where locid >= 30;
+explain select * from loc_orc where locid < 30;
+explain select * from loc_orc where locid > 30;
+explain select * from loc_orc where locid <= 30;
+explain select * from loc_orc where locid >= 30;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q Sun Aug 10 01:33:50 2014
@@ -15,14 +15,14 @@ load data local inpath '../../data/files
 insert overwrite table loc_orc select * from loc_staging;
 
 -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc;
+explain select * from loc_orc;
 
 -- partial column stats
 analyze table loc_orc compute statistics for columns state;
 
 -- inner group by: map - numRows: 8 reduce - numRows: 4
 -- outer group by: map - numRows: 4 reduce numRows: 2
-explain extended select a, c, min(b)
+explain select a, c, min(b)
 from ( select state as a, locid as b, count(*) as c
        from loc_orc
        group by state,locid
@@ -34,36 +34,36 @@ analyze table loc_orc compute statistics
 -- only one distinct value in year column + 1 NULL value
 -- map-side GBY: numRows: 8 (map-side will not do any reduction)
 -- reduce-side GBY: numRows: 2
-explain extended select year from loc_orc group by year;
+explain select year from loc_orc group by year;
 
 -- map-side GBY: numRows: 8
 -- reduce-side GBY: numRows: 4
-explain extended select state,locid from loc_orc group by state,locid;
+explain select state,locid from loc_orc group by state,locid;
 
 -- map-side GBY numRows: 32 reduce-side GBY numRows: 16
-explain extended select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc group by state,locid with cube;
 
 -- map-side GBY numRows: 24 reduce-side GBY numRows: 12
-explain extended select state,locid from loc_orc group by state,locid with rollup;
+explain select state,locid from loc_orc group by state,locid with rollup;
 
 -- map-side GBY numRows: 8 reduce-side GBY numRows: 4
-explain extended select state,locid from loc_orc group by state,locid grouping sets((state));
+explain select state,locid from loc_orc group by state,locid grouping sets((state));
 
 -- map-side GBY numRows: 16 reduce-side GBY numRows: 8
-explain extended select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
 
 -- map-side GBY numRows: 24 reduce-side GBY numRows: 12
-explain extended select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
 
 -- map-side GBY numRows: 32 reduce-side GBY numRows: 16
-explain extended select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
 
 set hive.stats.map.parallelism=10;
 
 -- map-side GBY: numRows: 80 (map-side will not do any reduction)
 -- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2)
-explain extended select year from loc_orc group by year;
+explain select year from loc_orc group by year;
 
 -- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
-explain extended select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc group by state,locid with cube;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_join.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_join.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_join.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_join.q Sun Aug 10 01:33:50 2014
@@ -1,81 +1,70 @@
 set hive.stats.fetch.column.stats=true;
+set hive.stats.ndv.error=0.0;
 
-create table if not exists emp_staging (
+create table if not exists emp (
   lastname string,
-  deptid int
+  deptid int,
+  locid int
 ) row format delimited fields terminated by '|' stored as textfile;
 
-create table if not exists dept_staging (
+create table if not exists dept (
   deptid int,
   deptname string
 ) row format delimited fields terminated by '|' stored as textfile;
 
-create table if not exists loc_staging (
+create table if not exists loc (
   state string,
   locid int,
   zip bigint,
   year int
 ) row format delimited fields terminated by '|' stored as textfile;
 
-create table if not exists emp_orc like emp_staging;
-alter table emp_orc set fileformat orc;
-
-create table if not exists dept_orc like dept_staging;
-alter table dept_orc set fileformat orc;
-
-create table loc_orc like loc_staging;
-alter table loc_orc set fileformat orc;
-
-LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp_staging;
-LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept_staging;
-LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc_staging;
-
-insert overwrite table emp_orc select * from emp_staging;
-insert overwrite table dept_orc select * from dept_staging;
-insert overwrite table loc_orc select * from loc_staging;
-
-analyze table emp_orc compute statistics for columns lastname,deptid;
-analyze table dept_orc compute statistics for columns deptname,deptid;
-analyze table loc_orc compute statistics for columns state,locid,zip,year;
+LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp;
+LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept;
+LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc;
+
+analyze table emp compute statistics;
+analyze table dept compute statistics;
+analyze table loc compute statistics;
+analyze table emp compute statistics for columns lastname,deptid,locid;
+analyze table dept compute statistics for columns deptname,deptid;
+analyze table loc compute statistics for columns state,locid,zip,year;
 
 -- number of rows
--- emp_orc  - 6
--- dept_orc - 4
--- loc_orc  - 8
+-- emp  - 48
+-- dept - 6
+-- loc  - 8
 
 -- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows)
--- emp_orc.deptid - 3
--- emp_orc.lastname - 7
--- dept_orc.deptid - 6
--- dept_orc.deptname - 5
--- loc_orc.locid - 6
--- loc_orc.state - 7
-
--- Expected output rows: 4
--- Reason: #rows = (6*4)/max(3,6)
-explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid);
-
--- 3 way join
--- Expected output rows: 4
--- Reason: #rows = (6*4*6)/max(3,6)*max(6,3)
-explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid) join emp_orc e1 on (e.deptid = e1.deptid);
-
--- Expected output rows: 5
--- Reason: #rows = (6*4*8)/max(3,6)*max(6,6)
-explain extended select * from emp_orc e join dept_orc d  on (e.deptid = d.deptid) join loc_orc l on (e.deptid = l.locid);
-
--- join keys of different types
--- Expected output rows: 4
--- Reason: #rows = (6*4*8)/max(3,6)*max(6,7)
-explain extended select * from emp_orc e join dept_orc d  on (e.deptid = d.deptid) join loc_orc l on (e.deptid = l.state);
-
--- multi-attribute join
--- Expected output rows: 0
--- Reason: #rows = (6*4)/max(3,6)*max(7,5)
-explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid and e.lastname = d.deptname);
-
--- 3 way and multi-attribute join
--- Expected output rows: 0
--- Reason: #rows = (6*4*8)/max(3,6)*max(7,5)*max(3,6)*max(7,7)
-explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc_orc l on (e.deptid = l.locid and e.lastname = l.state);
+-- emp.deptid - 3
+-- emp.lastname - 6
+-- emp.locid - 7
+-- dept.deptid - 7
+-- dept.deptname - 6
+-- loc.locid - 7
+-- loc.state - 6
+
+-- 2 relations, 1 attribute
+-- Expected output rows: (48*6)/max(3,7) = 41
+explain select * from emp e join dept d on (e.deptid = d.deptid);
+
+-- 2 relations, 2 attributes
+-- Expected output rows: (48*6)/(max(3,7) * max(6,6)) = 6
+explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname;
+explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname);
+
+-- 2 relations, 3 attributes
+-- Expected output rows: (48*6)/(max(3,7) * max(6,6) * max(6,6)) = 1
+explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname;
+
+-- 3 relations, 1 attribute
+-- Expected output rows: (48*6*48)/top2largest(3,7,3) = 658
+explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid);
+
+-- Expected output rows: (48*6*8)/top2largest(3,7,7) = 47
+explain select * from emp e join dept d  on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid);
+
+-- 3 relations and 2 attribute
+-- Expected output rows: (48*6*8)/top2largest(3,7,7)*top2largest(6,6,6) = 1
+explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state);
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_limit.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_limit.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_limit.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_limit.q Sun Aug 10 01:33:50 2014
@@ -17,14 +17,14 @@ insert overwrite table loc_orc select * 
 analyze table loc_orc compute statistics for columns state, locid, zip, year;
 
 -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc;
+explain select * from loc_orc;
 
 -- numRows: 4 rawDataSize: 396
-explain extended select * from loc_orc limit 4;
+explain select * from loc_orc limit 4;
 
 -- greater than the available number of rows
 -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc limit 16;
+explain select * from loc_orc limit 16;
 
 -- numRows: 0 rawDataSize: 0
-explain extended select * from loc_orc limit 0;
+explain select * from loc_orc limit 0;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q Sun Aug 10 01:33:50 2014
@@ -19,67 +19,67 @@ create table if not exists loc_orc (
 ) partitioned by(year string) stored as orc;
 
 -- basicStatState: NONE colStatState: NONE
-explain extended select * from loc_orc;
+explain select * from loc_orc;
 
 insert overwrite table loc_orc partition(year) select * from loc_staging;
 
 -- stats are disabled. basic stats will report the file size but not raw data size. so initial statistics will be PARTIAL
 
 -- basicStatState: PARTIAL colStatState: NONE
-explain extended select * from loc_orc;
+explain select * from loc_orc;
 
 -- partition level analyze statistics for specific parition
 analyze table loc_orc partition(year='2001') compute statistics;
 
 -- basicStatState: PARTIAL colStatState: NONE
-explain extended select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__';
+explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__';
 
 -- basicStatState: PARTIAL colStatState: NONE
-explain extended select * from loc_orc;
+explain select * from loc_orc;
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc where year='2001';
+explain select * from loc_orc where year='2001';
 
 -- partition level analyze statistics for all partitions
 analyze table loc_orc partition(year) compute statistics;
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__';
+explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__';
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc;
+explain select * from loc_orc;
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__';
+explain select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__';
 
 -- both partitions will be pruned
 -- basicStatState: NONE colStatState: NONE
-explain extended select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__';
+explain select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__';
 
 -- partition level partial column statistics
 analyze table loc_orc partition(year='2001') compute statistics for columns state,locid;
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select zip from loc_orc;
+explain select zip from loc_orc;
 
 -- basicStatState: COMPLETE colStatState: PARTIAL
-explain extended select state from loc_orc;
+explain select state from loc_orc;
 
 -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL
 -- basicStatState: COMPLETE colStatState: PARTIAL
-explain extended select state,locid from loc_orc;
+explain select state,locid from loc_orc;
 
 -- basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select state,locid from loc_orc where year='2001';
+explain select state,locid from loc_orc where year='2001';
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select state,locid from loc_orc where year!='2001';
+explain select state,locid from loc_orc where year!='2001';
 
 -- basicStatState: COMPLETE colStatState: PARTIAL
-explain extended select * from loc_orc;
+explain select * from loc_orc;
 
 -- This is to test filter expression evaluation on partition column
 -- numRows: 2 dataSize: 8 basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select locid from loc_orc where locid>0 and year='2001';
-explain extended select locid,year from loc_orc where locid>0 and year='2001';
-explain extended select * from (select locid,year from loc_orc) test where locid>0 and year='2001';
+explain select locid from loc_orc where locid>0 and year='2001';
+explain select locid,year from loc_orc where locid>0 and year='2001';
+explain select * from (select locid,year from loc_orc) test where locid>0 and year='2001';

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_select.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_select.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_select.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_select.q Sun Aug 10 01:33:50 2014
@@ -28,116 +28,116 @@ load data local inpath '../../data/files
 insert overwrite table alltypes_orc select * from alltypes;
 
 -- basicStatState: COMPLETE colStatState: NONE numRows: 2 rawDataSize: 1514
-explain extended select * from alltypes_orc;
+explain select * from alltypes_orc;
 
 -- statistics for complex types are not supported yet
 analyze table alltypes_orc compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1;
 
 -- numRows: 2 rawDataSize: 1514
-explain extended select * from alltypes_orc;
+explain select * from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 8
-explain extended select bo1 from alltypes_orc;
+explain select bo1 from alltypes_orc;
 
 -- col alias renaming
 -- numRows: 2 rawDataSize: 8
-explain extended select i1 as int1 from alltypes_orc;
+explain select i1 as int1 from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 174
-explain extended select s1 from alltypes_orc;
+explain select s1 from alltypes_orc;
 
 -- column statistics for complex types unsupported and so statistics will not be updated
 -- numRows: 2 rawDataSize: 1514
-explain extended select m1 from alltypes_orc;
+explain select m1 from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 246
-explain extended select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc;
+explain select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 0
-explain extended select null from alltypes_orc;
+explain select null from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 8
-explain extended select 11 from alltypes_orc;
+explain select 11 from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 16
-explain extended select 11L from alltypes_orc;
+explain select 11L from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 16
-explain extended select 11.0 from alltypes_orc;
+explain select 11.0 from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 178
-explain extended select "hello" from alltypes_orc;
-explain extended select cast("hello" as char(5)) from alltypes_orc;
-explain extended select cast("hello" as varchar(5)) from alltypes_orc;
+explain select "hello" from alltypes_orc;
+explain select cast("hello" as char(5)) from alltypes_orc;
+explain select cast("hello" as varchar(5)) from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 96
-explain extended select unbase64("0xe23") from alltypes_orc;
+explain select unbase64("0xe23") from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 16
-explain extended select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc;
+explain select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 80
-explain extended select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc;
+explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 112
-explain extended select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc;
+explain select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 224
-explain extended select cast("58.174" as DECIMAL) from alltypes_orc;
+explain select cast("58.174" as DECIMAL) from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 112
-explain extended select array(1,2,3) from alltypes_orc;
+explain select array(1,2,3) from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 1508
-explain extended select str_to_map("a=1 b=2 c=3", " ", "=") from alltypes_orc;
+explain select str_to_map("a=1 b=2 c=3", " ", "=") from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 112
-explain extended select NAMED_STRUCT("a", 11, "b", 11) from alltypes_orc;
+explain select NAMED_STRUCT("a", 11, "b", 11) from alltypes_orc;
 
 -- numRows: 2 rawDataSize: 250
-explain extended select CREATE_UNION(0, "hello") from alltypes_orc;
+explain select CREATE_UNION(0, "hello") from alltypes_orc;
 
 -- COUNT(*) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows
 -- numRows: 1 rawDataSize: 8
-explain extended select count(*) from alltypes_orc;
+explain select count(*) from alltypes_orc;
 
 -- COUNT(1) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows
 -- numRows: 1 rawDataSize: 8
-explain extended select count(1) from alltypes_orc;
+explain select count(1) from alltypes_orc;
 
 -- column statistics for complex column types will be missing. data size will be calculated from available column statistics
 -- numRows: 2 rawDataSize: 254
-explain extended select *,11 from alltypes_orc;
+explain select *,11 from alltypes_orc;
 
 -- subquery selects
 -- inner select - numRows: 2 rawDataSize: 8
 -- outer select - numRows: 2 rawDataSize: 8
-explain extended select i1 from (select i1 from alltypes_orc limit 10) temp;
+explain select i1 from (select i1 from alltypes_orc limit 10) temp;
 
 -- inner select - numRows: 2 rawDataSize: 16
 -- outer select - numRows: 2 rawDataSize: 8
-explain extended select i1 from (select i1,11 from alltypes_orc limit 10) temp;
+explain select i1 from (select i1,11 from alltypes_orc limit 10) temp;
 
 -- inner select - numRows: 2 rawDataSize: 16
 -- outer select - numRows: 2 rawDataSize: 186
-explain extended select i1,"hello" from (select i1,11 from alltypes_orc limit 10) temp;
+explain select i1,"hello" from (select i1,11 from alltypes_orc limit 10) temp;
 
 -- inner select - numRows: 2 rawDataSize: 24
 -- outer select - numRows: 2 rawDataSize: 16
-explain extended select x from (select i1,11.0 as x from alltypes_orc limit 10) temp;
+explain select x from (select i1,11.0 as x from alltypes_orc limit 10) temp;
 
 -- inner select - numRows: 2 rawDataSize: 104
 -- outer select - numRows: 2 rawDataSize: 186
-explain extended select x,"hello" from (select i1 as x, unbase64("0xe23") as ub from alltypes_orc limit 10) temp;
+explain select x,"hello" from (select i1 as x, unbase64("0xe23") as ub from alltypes_orc limit 10) temp;
 
 -- inner select -  numRows: 2 rawDataSize: 186
 -- middle select - numRows: 2 rawDataSize: 178
 -- outer select -  numRows: 2 rawDataSize: 194
-explain extended select h, 11.0 from (select hell as h from (select i1, "hello" as hell from alltypes_orc limit 10) in1 limit 10) in2;
+explain select h, 11.0 from (select hell as h from (select i1, "hello" as hell from alltypes_orc limit 10) in1 limit 10) in2;
 
 -- This test is for FILTER operator where filter expression is a boolean column
 -- numRows: 2 rawDataSize: 8
-explain extended select bo1 from alltypes_orc where bo1;
+explain select bo1 from alltypes_orc where bo1;
 
 -- numRows: 0 rawDataSize: 0
-explain extended select bo1 from alltypes_orc where !bo1;
+explain select bo1 from alltypes_orc where !bo1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_table.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_table.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_table.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_table.q Sun Aug 10 01:33:50 2014
@@ -10,7 +10,7 @@ create table if not exists emp_orc like 
 alter table emp_orc set fileformat orc;
 
 -- basicStatState: NONE colStatState: NONE
-explain extended select * from emp_orc;
+explain select * from emp_orc;
 
 LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp_staging;
 
@@ -19,35 +19,35 @@ insert overwrite table emp_orc select * 
 -- stats are disabled. basic stats will report the file size but not raw data size. so initial statistics will be PARTIAL
 
 -- basicStatState: PARTIAL colStatState: NONE
-explain extended select * from emp_orc;
+explain select * from emp_orc;
 
 -- table level analyze statistics
 analyze table emp_orc compute statistics;
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from emp_orc;
+explain select * from emp_orc;
 
 -- column level partial statistics
 analyze table emp_orc compute statistics for columns deptid;
 
 -- basicStatState: COMPLETE colStatState: PARTIAL
-explain extended select * from emp_orc;
+explain select * from emp_orc;
 
 -- all selected columns have statistics
 -- basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select deptid from emp_orc;
+explain select deptid from emp_orc;
 
 -- column level complete statistics
 analyze table emp_orc compute statistics for columns lastname,deptid;
 
 -- basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select * from emp_orc;
+explain select * from emp_orc;
 
 -- basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select lastname from emp_orc;
+explain select lastname from emp_orc;
 
 -- basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select deptid from emp_orc;
+explain select deptid from emp_orc;
 
 -- basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select lastname,deptid from emp_orc;
+explain select lastname,deptid from emp_orc;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_union.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_union.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_union.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_union.q Sun Aug 10 01:33:50 2014
@@ -17,16 +17,16 @@ insert overwrite table loc_orc select * 
 analyze table loc_orc compute statistics for columns state,locid,zip,year;
 
 -- numRows: 8 rawDataSize: 688
-explain extended select state from loc_orc;
+explain select state from loc_orc;
 
 -- numRows: 16 rawDataSize: 1376
-explain extended select * from (select state from loc_orc union all select state from loc_orc) tmp;
+explain select * from (select state from loc_orc union all select state from loc_orc) tmp;
 
 -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc;
+explain select * from loc_orc;
 
 -- numRows: 16 rawDataSize: 1592
-explain extended select * from (select * from loc_orc union all select * from loc_orc) tmp;
+explain select * from (select * from loc_orc union all select * from loc_orc) tmp;
 
 create database test;
 use test;
@@ -49,7 +49,7 @@ analyze table loc_staging compute statis
 analyze table loc_orc compute statistics for columns state,locid,zip,year;
 
 -- numRows: 16 rawDataSize: 1376
-explain extended select * from (select state from default.loc_orc union all select state from test.loc_orc) temp;
+explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp;
 
 -- numRows: 16 rawDataSize: 1376
-explain extended select * from (select state from test.loc_staging union all select state from test.loc_orc) temp;
+explain select * from (select state from test.loc_staging union all select state from test.loc_orc) temp;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/columnstats_partlvl.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/columnstats_partlvl.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/columnstats_partlvl.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/columnstats_partlvl.q Sun Aug 10 01:33:50 2014
@@ -30,4 +30,12 @@ explain 
 analyze table Employee_Part  compute statistics for columns;
 analyze table Employee_Part  compute statistics for columns;
 
+describe formatted Employee_Part.employeeID partition(employeeSalary=2000.0);
+describe formatted Employee_Part.employeeID partition(employeeSalary=4000.0);
+
+set hive.analyze.stmt.collect.partlevel.stats=false;
+explain 
+analyze table Employee_Part  compute statistics for columns;
+analyze table Employee_Part  compute statistics for columns;
+
 describe formatted Employee_Part.employeeID;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/parquet_columnar.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/parquet_columnar.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/parquet_columnar.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/parquet_columnar.q Sun Aug 10 01:33:50 2014
@@ -13,15 +13,16 @@ CREATE TABLE parquet_columnar_access_sta
 
 CREATE TABLE parquet_columnar_access (
     s string,
-    i int,
+    x int,
+    y int,
     f float
   ) STORED AS PARQUET;
 
 LOAD DATA LOCAL INPATH '../../data/files/parquet_columnar.txt' OVERWRITE INTO TABLE parquet_columnar_access_stage;
 
-INSERT OVERWRITE TABLE parquet_columnar_access SELECT * FROM parquet_columnar_access_stage;
+INSERT OVERWRITE TABLE parquet_columnar_access SELECT s, i, (i + 1), f FROM parquet_columnar_access_stage;
 SELECT * FROM parquet_columnar_access;
 
-ALTER TABLE parquet_columnar_access REPLACE COLUMNS (s1 string, i1 int, f1 float);
+ALTER TABLE parquet_columnar_access REPLACE COLUMNS (s1 string, x1 int, y1 int, f1 float);
 
 SELECT * FROM parquet_columnar_access;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_14.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_14.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_14.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_14.q Sun Aug 10 01:33:50 2014
@@ -1,4 +1,38 @@
 SET hive.vectorized.execution.enabled=true;
+
+EXPLAIN 
+SELECT   ctimestamp1,
+         cfloat,
+         cstring1,
+         cboolean1,
+         cdouble,
+         (-26.28 + cdouble),
+         (-((-26.28 + cdouble))),
+         STDDEV_SAMP((-((-26.28 + cdouble)))),
+         (cfloat * -26.28),
+         MAX(cfloat),
+         (-(cfloat)),
+         (-(MAX(cfloat))),
+         ((-((-26.28 + cdouble))) / 10.175),
+         STDDEV_POP(cfloat),
+         COUNT(cfloat),
+         (-(((-((-26.28 + cdouble))) / 10.175))),
+         (-1.389 % STDDEV_SAMP((-((-26.28 + cdouble))))),
+         (cfloat - cdouble),
+         VAR_POP(cfloat),
+         (VAR_POP(cfloat) % 10.175),
+         VAR_SAMP(cfloat),
+         (-((cfloat - cdouble)))
+FROM     alltypesorc
+WHERE    (((ctinyint <= cbigint)
+           AND ((cint <= cdouble)
+                OR (ctimestamp2 < ctimestamp1)))
+          AND ((cdouble < ctinyint)
+              AND ((cbigint > -257)
+                  OR (cfloat < cint))))
+GROUP BY ctimestamp1, cfloat, cstring1, cboolean1, cdouble
+ORDER BY cstring1, cfloat, cdouble, ctimestamp1;
+
 SELECT   ctimestamp1,
          cfloat,
          cstring1,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_15.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_15.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_15.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_15.q Sun Aug 10 01:33:50 2014
@@ -1,4 +1,36 @@
 SET hive.vectorized.execution.enabled=true;
+
+EXPLAIN 
+SELECT   cfloat,
+         cboolean1,
+         cdouble,
+         cstring1,
+         ctinyint,
+         cint,
+         ctimestamp1,
+         STDDEV_SAMP(cfloat),
+         (-26.28 - cint),
+         MIN(cdouble),
+         (cdouble * 79.553),
+         (33 % cfloat),
+         STDDEV_SAMP(ctinyint),
+         VAR_POP(ctinyint),
+         (-23 % cdouble),
+         (-(ctinyint)),
+         VAR_SAMP(cint),
+         (cint - cfloat),
+         (-23 % ctinyint),
+         (-((-26.28 - cint))),
+         STDDEV_POP(cint)
+FROM     alltypesorc
+WHERE    (((cstring2 LIKE '%ss%')
+           OR (cstring1 LIKE '10%'))
+          OR ((cint >= -75)
+              AND ((ctinyint = csmallint)
+                   AND (cdouble >= -3728))))
+GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1;
+
 SELECT   cfloat,
          cboolean1,
          cdouble,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_16.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_16.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_16.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_16.q Sun Aug 10 01:33:50 2014
@@ -1,4 +1,25 @@
 SET hive.vectorized.execution.enabled=true;
+
+EXPLAIN 
+SELECT   cstring1,
+         cdouble,
+         ctimestamp1,
+         (cdouble - 9763215.5639),
+         (-((cdouble - 9763215.5639))),
+         COUNT(cdouble),
+         STDDEV_SAMP(cdouble),
+         (-(STDDEV_SAMP(cdouble))),
+         (STDDEV_SAMP(cdouble) * COUNT(cdouble)),
+         MIN(cdouble),
+         (9763215.5639 / cdouble),
+         (COUNT(cdouble) / -1.389),
+         STDDEV_SAMP(cdouble)
+FROM     alltypesorc
+WHERE    ((cstring2 LIKE '%b%')
+          AND ((cdouble >= -1.389)
+              OR (cstring1 < 'a')))
+GROUP BY cstring1, cdouble, ctimestamp1;
+
 SELECT   cstring1,
          cdouble,
          ctimestamp1,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_9.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_9.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_9.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_9.q Sun Aug 10 01:33:50 2014
@@ -1,4 +1,25 @@
 SET hive.vectorized.execution.enabled=true;
+
+EXPLAIN 
+SELECT   cstring1,
+         cdouble,
+         ctimestamp1,
+         (cdouble - 9763215.5639),
+         (-((cdouble - 9763215.5639))),
+         COUNT(cdouble),
+         STDDEV_SAMP(cdouble),
+         (-(STDDEV_SAMP(cdouble))),
+         (STDDEV_SAMP(cdouble) * COUNT(cdouble)),
+         MIN(cdouble),
+         (9763215.5639 / cdouble),
+         (COUNT(cdouble) / -1.389),
+         STDDEV_SAMP(cdouble)
+FROM     alltypesorc
+WHERE    ((cstring2 LIKE '%b%')
+          AND ((cdouble >= -1.389)
+              OR (cstring1 < 'a')))
+GROUP BY cstring1, cdouble, ctimestamp1;
+
 SELECT   cfloat,
          cstring1,
          cint,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_date_funcs.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_date_funcs.q?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_date_funcs.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_date_funcs.q Sun Aug 10 01:33:50 2014
@@ -120,3 +120,6 @@ SELECT 
   datediff(fl_date, date_sub(fl_date, 2)),
   datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) 
 FROM date_udf_flight_orc LIMIT 10;
+
+-- Test extracting the date part of expression that includes time
+SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1;
\ No newline at end of file

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_public_drop.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_public_drop.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_public_drop.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_public_drop.q.out Sun Aug 10 01:33:50 2014
@@ -1,3 +1,3 @@
 PREHOOK: query: drop role PUBLIC
 PREHOOK: type: DROPROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public/admin role can't be dropped.)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public,admin roles can't be dropped.)

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out Sun Aug 10 01:33:50 2014
@@ -19,4 +19,4 @@ POSTHOOK: type: GRANT_ROLE
 PREHOOK: query: -- this will create a cycle
 grant role role2 to role role1
 PREHOOK: type: GRANT_ROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot grant role role1 to role2 as role2 already belongs to the role role1. (no cycles allowed)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Error granting role: Cannot grant role role1 to role2 as role2 already belongs to the role role1. (no cycles allowed)

Modified: hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out Sun Aug 10 01:33:50 2014
@@ -45,4 +45,4 @@ POSTHOOK: type: GRANT_ROLE
 PREHOOK: query: -- this will create a cycle in middle of the hierarchy
 grant role role2 to role role4
 PREHOOK: type: GRANT_ROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Cannot grant role role4 to role2 as role2 already belongs to the role role4. (no cycles allowed)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Error granting role: Cannot grant role role4 to role2 as role2 already belongs to the role role4. (no cycles allowed)

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_filter.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_filter.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_filter.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_limit.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_limit.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_limit.q.out Sun Aug 10 01:33:50 2014
@@ -59,27 +59,11 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
 #### A masked pattern was here ####
 PREHOOK: query: -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc
+explain select * from loc_orc
 PREHOOK: type: QUERY
 POSTHOOK: query: -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc
+explain select * from loc_orc
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            loc_orc
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -91,7 +75,6 @@ STAGE PLANS:
         TableScan
           alias: loc_orc
           Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
-          GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -99,29 +82,11 @@ STAGE PLANS:
             ListSink
 
 PREHOOK: query: -- numRows: 4 rawDataSize: 396
-explain extended select * from loc_orc limit 4
+explain select * from loc_orc limit 4
 PREHOOK: type: QUERY
 POSTHOOK: query: -- numRows: 4 rawDataSize: 396
-explain extended select * from loc_orc limit 4
+explain select * from loc_orc limit 4
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            loc_orc
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_LIMIT
-         4
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -133,7 +98,6 @@ STAGE PLANS:
         TableScan
           alias: loc_orc
           Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
-          GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -145,30 +109,12 @@ STAGE PLANS:
 
 PREHOOK: query: -- greater than the available number of rows
 -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc limit 16
+explain select * from loc_orc limit 16
 PREHOOK: type: QUERY
 POSTHOOK: query: -- greater than the available number of rows
 -- numRows: 8 rawDataSize: 796
-explain extended select * from loc_orc limit 16
+explain select * from loc_orc limit 16
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            loc_orc
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_LIMIT
-         16
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -180,7 +126,6 @@ STAGE PLANS:
         TableScan
           alias: loc_orc
           Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
-          GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
@@ -191,29 +136,11 @@ STAGE PLANS:
               ListSink
 
 PREHOOK: query: -- numRows: 0 rawDataSize: 0
-explain extended select * from loc_orc limit 0
+explain select * from loc_orc limit 0
 PREHOOK: type: QUERY
 POSTHOOK: query: -- numRows: 0 rawDataSize: 0
-explain extended select * from loc_orc limit 0
+explain select * from loc_orc limit 0
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            loc_orc
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_LIMIT
-         0
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -225,7 +152,6 @@ STAGE PLANS:
         TableScan
           alias: loc_orc
           Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
-          GatherStats: false
           Select Operator
             expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_select.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_select.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_select.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_select.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_table.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_table.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_table.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_union.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_union.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_union.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_union.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/columnstats_partlvl.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/columnstats_partlvl.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/columnstats_partlvl.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/columnstats_partlvl.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/combine2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/combine2.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/combine2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/combine2.q.out Sun Aug 10 01:33:50 2014
@@ -671,12 +671,12 @@ STAGE PLANS:
                 keys: ds (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_11.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_11.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_11.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_11.q.out Sun Aug 10 01:33:50 2014
@@ -395,11 +395,11 @@ STAGE PLANS:
                 keys: 1 (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
-                  Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(DISTINCT KEY._col0:0._col0)

Modified: hive/branches/spark/ql/src/test/results/clientpositive/input24.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/input24.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/input24.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/input24.q.out Sun Aug 10 01:33:50 2014
@@ -28,9 +28,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: x
-            Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
               Group By Operator
                 aggregations: count(1)
                 mode: hash

Modified: hive/branches/spark/ql/src/test/results/clientpositive/input25.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/input25.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/input25.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/input25.q.out Sun Aug 10 01:33:50 2014
@@ -45,14 +45,14 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: x
-            Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Select Operator
               expressions: a (type: int), b (type: int), d (type: string)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Limit
                 Number of rows: 10
-                Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
@@ -109,14 +109,14 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: x
-            Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Select Operator
               expressions: a (type: int), b (type: int), d (type: string)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Limit
                 Number of rows: 10
-                Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE

Modified: hive/branches/spark/ql/src/test/results/clientpositive/metadataonly1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/metadataonly1.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/metadataonly1.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/metadataonly1.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/nullgroup3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/nullgroup3.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/nullgroup3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/nullgroup3.q.out Sun Aug 10 01:33:50 2014
@@ -129,9 +129,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tstparttbl2
-            Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
               Group By Operator
                 aggregations: count(1)
                 mode: hash
@@ -325,9 +325,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tstparttbl2
-            Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
               Group By Operator
                 aggregations: count(1)
                 mode: hash

Modified: hive/branches/spark/ql/src/test/results/clientpositive/parquet_columnar.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/parquet_columnar.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/parquet_columnar.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/parquet_columnar.q.out Sun Aug 10 01:33:50 2014
@@ -29,14 +29,16 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_columnar_access_stage
 PREHOOK: query: CREATE TABLE parquet_columnar_access (
     s string,
-    i int,
+    x int,
+    y int,
     f float
   ) STORED AS PARQUET
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 POSTHOOK: query: CREATE TABLE parquet_columnar_access (
     s string,
-    i int,
+    x int,
+    y int,
     f float
   ) STORED AS PARQUET
 POSTHOOK: type: CREATETABLE
@@ -50,17 +52,18 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH 
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@parquet_columnar_access_stage
-PREHOOK: query: INSERT OVERWRITE TABLE parquet_columnar_access SELECT * FROM parquet_columnar_access_stage
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_columnar_access SELECT s, i, (i + 1), f FROM parquet_columnar_access_stage
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parquet_columnar_access_stage
 PREHOOK: Output: default@parquet_columnar_access
-POSTHOOK: query: INSERT OVERWRITE TABLE parquet_columnar_access SELECT * FROM parquet_columnar_access_stage
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_columnar_access SELECT s, i, (i + 1), f FROM parquet_columnar_access_stage
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@parquet_columnar_access_stage
 POSTHOOK: Output: default@parquet_columnar_access
 POSTHOOK: Lineage: parquet_columnar_access.f SIMPLE [(parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:f, type:float, comment:null), ]
-POSTHOOK: Lineage: parquet_columnar_access.i SIMPLE [(parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:i, type:int, comment:null), ]
 POSTHOOK: Lineage: parquet_columnar_access.s SIMPLE [(parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: parquet_columnar_access.x SIMPLE [(parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_columnar_access.y EXPRESSION [(parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:i, type:int, comment:null), ]
 PREHOOK: query: SELECT * FROM parquet_columnar_access
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parquet_columnar_access
@@ -69,32 +72,32 @@ POSTHOOK: query: SELECT * FROM parquet_c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@parquet_columnar_access
 #### A masked pattern was here ####
-1abc00	1	1.0
-1def01	2	1.1
-1ghi02	3	1.2
-1jkl03	1	1.3
-1mno04	2	1.4
-1pqr05	3	1.0
-1stu06	1	1.1
-1vwx07	2	1.2
-1yza08	3	1.3
-1bcd09	1	1.4
-1efg10	2	1.0
-1hij11	3	1.1
-1klm12	1	1.2
-1nop13	2	1.3
-1qrs14	3	1.4
-1tuv15	1	1.0
-1wxy16	2	1.1
-1zab17	3	1.2
-1cde18	1	1.3
-1fgh19	2	1.4
-1ijk20	3	1.0
-PREHOOK: query: ALTER TABLE parquet_columnar_access REPLACE COLUMNS (s1 string, i1 int, f1 float)
+1abc00	1	2	1.0
+1def01	2	3	1.1
+1ghi02	3	4	1.2
+1jkl03	1	2	1.3
+1mno04	2	3	1.4
+1pqr05	3	4	1.0
+1stu06	1	2	1.1
+1vwx07	2	3	1.2
+1yza08	3	4	1.3
+1bcd09	1	2	1.4
+1efg10	2	3	1.0
+1hij11	3	4	1.1
+1klm12	1	2	1.2
+1nop13	2	3	1.3
+1qrs14	3	4	1.4
+1tuv15	1	2	1.0
+1wxy16	2	3	1.1
+1zab17	3	4	1.2
+1cde18	1	2	1.3
+1fgh19	2	3	1.4
+1ijk20	3	4	1.0
+PREHOOK: query: ALTER TABLE parquet_columnar_access REPLACE COLUMNS (s1 string, x1 int, y1 int, f1 float)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@parquet_columnar_access
 PREHOOK: Output: default@parquet_columnar_access
-POSTHOOK: query: ALTER TABLE parquet_columnar_access REPLACE COLUMNS (s1 string, i1 int, f1 float)
+POSTHOOK: query: ALTER TABLE parquet_columnar_access REPLACE COLUMNS (s1 string, x1 int, y1 int, f1 float)
 POSTHOOK: type: ALTERTABLE_REPLACECOLS
 POSTHOOK: Input: default@parquet_columnar_access
 POSTHOOK: Output: default@parquet_columnar_access
@@ -106,24 +109,24 @@ POSTHOOK: query: SELECT * FROM parquet_c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@parquet_columnar_access
 #### A masked pattern was here ####
-1abc00	1	1.0
-1def01	2	1.1
-1ghi02	3	1.2
-1jkl03	1	1.3
-1mno04	2	1.4
-1pqr05	3	1.0
-1stu06	1	1.1
-1vwx07	2	1.2
-1yza08	3	1.3
-1bcd09	1	1.4
-1efg10	2	1.0
-1hij11	3	1.1
-1klm12	1	1.2
-1nop13	2	1.3
-1qrs14	3	1.4
-1tuv15	1	1.0
-1wxy16	2	1.1
-1zab17	3	1.2
-1cde18	1	1.3
-1fgh19	2	1.4
-1ijk20	3	1.0
+1abc00	1	2	1.0
+1def01	2	3	1.1
+1ghi02	3	4	1.2
+1jkl03	1	2	1.3
+1mno04	2	3	1.4
+1pqr05	3	4	1.0
+1stu06	1	2	1.1
+1vwx07	2	3	1.2
+1yza08	3	4	1.3
+1bcd09	1	2	1.4
+1efg10	2	3	1.0
+1hij11	3	4	1.1
+1klm12	1	2	1.2
+1nop13	2	3	1.3
+1qrs14	3	4	1.4
+1tuv15	1	2	1.0
+1wxy16	2	3	1.1
+1zab17	3	4	1.2
+1cde18	1	2	1.3
+1fgh19	2	3	1.4
+1ijk20	3	4	1.0

Modified: hive/branches/spark/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/tez/metadataonly1.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/tez/metadataonly1.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/tez/metadataonly1.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/tez/union5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/tez/union5.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/tez/union5.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/tez/union5.q.out Sun Aug 10 01:33:50 2014
@@ -86,14 +86,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/tez/union7.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/tez/union7.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/tez/union7.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/tez/union7.q.out Sun Aug 10 01:33:50 2014
@@ -90,14 +90,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_15.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_15.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_15.q.out Sun Aug 10 01:33:50 2014
@@ -1,3 +1,140 @@
+PREHOOK: query: EXPLAIN 
+SELECT   cfloat,
+         cboolean1,
+         cdouble,
+         cstring1,
+         ctinyint,
+         cint,
+         ctimestamp1,
+         STDDEV_SAMP(cfloat),
+         (-26.28 - cint),
+         MIN(cdouble),
+         (cdouble * 79.553),
+         (33 % cfloat),
+         STDDEV_SAMP(ctinyint),
+         VAR_POP(ctinyint),
+         (-23 % cdouble),
+         (-(ctinyint)),
+         VAR_SAMP(cint),
+         (cint - cfloat),
+         (-23 % ctinyint),
+         (-((-26.28 - cint))),
+         STDDEV_POP(cint)
+FROM     alltypesorc
+WHERE    (((cstring2 LIKE '%ss%')
+           OR (cstring1 LIKE '10%'))
+          OR ((cint >= -75)
+              AND ((ctinyint = csmallint)
+                   AND (cdouble >= -3728))))
+GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+SELECT   cfloat,
+         cboolean1,
+         cdouble,
+         cstring1,
+         ctinyint,
+         cint,
+         ctimestamp1,
+         STDDEV_SAMP(cfloat),
+         (-26.28 - cint),
+         MIN(cdouble),
+         (cdouble * 79.553),
+         (33 % cfloat),
+         STDDEV_SAMP(ctinyint),
+         VAR_POP(ctinyint),
+         (-23 % cdouble),
+         (-(ctinyint)),
+         VAR_SAMP(cint),
+         (cint - cfloat),
+         (-23 % ctinyint),
+         (-((-26.28 - cint))),
+         STDDEV_POP(cint)
+FROM     alltypesorc
+WHERE    (((cstring2 LIKE '%ss%')
+           OR (cstring1 LIKE '10%'))
+          OR ((cint >= -75)
+              AND ((ctinyint = csmallint)
+                   AND (cdouble >= -3728))))
+GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((cstring2 like '%ss%') or (cstring1 like '10%')) or ((cint >= (- 75)) and ((ctinyint = csmallint) and (cdouble >= (- 3728))))) (type: boolean)
+                    Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)
+                      outputColumnNames: cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+                      Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: stddev_samp(cfloat), min(cdouble), stddev_samp(ctinyint), var_pop(ctinyint), var_samp(cint), stddev_pop(cint)
+                        keys: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+                        Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                          sort order: +++++++
+                          Map-reduce partition columns: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                          Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: double), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>), _col11 (type: struct<count:bigint,sum:double,variance:double>), _col12 (type: struct<count:bigint,sum:double,variance:double>)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: stddev_samp(VALUE._col0), min(VALUE._col1), stddev_samp(VALUE._col2), var_pop(VALUE._col3), var_samp(VALUE._col4), stddev_pop(VALUE._col5)
+                keys: KEY._col0 (type: float), KEY._col1 (type: boolean), KEY._col2 (type: double), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int), KEY._col6 (type: timestamp)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+                Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), ((- 26.28) - _col5) (type: double), _col8 (type: double), (_col2 * 79.553) (type: double), (33 % _col0) (type: float), _col9 (type: double), _col10 (type: double), ((- 23) % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (_col5 - _col0) (type: float), ((- 23) % _col4) (type: int), (- ((- 26.28) - _col5)) (type: double), _col12 (type: double)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                  Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                    sort order: +++++++
+                    Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: float), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: tinyint), _col16 (type: double), _col17 (type: float), _col18 (type: int), _col19 (type: double), _col20 (type: double)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: float), KEY.reducesinkkey1 (type: boolean), KEY.reducesinkkey2 (type: double), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: tinyint), KEY.reducesinkkey5 (type: int), KEY.reducesinkkey6 (type: timestamp), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: float), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: tinyint), VALUE._col9 (type: double), VALUE._col10 (type: float), VALUE._col11 (type: int), VALUE._col12 (type: double), VALUE._col13 (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: SELECT   cfloat,
          cboolean1,
          cdouble,

Modified: hive/branches/spark/ql/src/test/results/clientpositive/udf_explode.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/udf_explode.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/udf_explode.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/udf_explode.q.out Sun Aug 10 01:33:50 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/udtf_explode.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/udtf_explode.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/udtf_explode.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/udtf_explode.q.out Sun Aug 10 01:33:50 2014 differ