You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/08/10 03:33:55 UTC

svn commit: r1617040 [13/13] - in /hive/branches/spark: ./ ant/src/org/apache/hadoop/hive/ant/ beeline/ beeline/src/java/org/apache/hive/beeline/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ data/conf/ dat...

Modified: hive/branches/spark/ql/src/test/results/clientpositive/union11.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/union11.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/union11.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/union11.q.out Sun Aug 10 01:33:50 2014
@@ -73,12 +73,12 @@ STAGE PLANS:
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col1 (type: bigint)
           TableScan
             Union
@@ -92,12 +92,12 @@ STAGE PLANS:
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col1 (type: bigint)
           TableScan
             Union
@@ -111,12 +111,12 @@ STAGE PLANS:
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -124,14 +124,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/union14.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/union14.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/union14.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/union14.q.out Sun Aug 10 01:33:50 2014
@@ -75,12 +75,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: bigint)
           TableScan
             Union
@@ -94,12 +94,12 @@ STAGE PLANS:
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -107,14 +107,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/union15.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/union15.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/union15.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/union15.q.out Sun Aug 10 01:33:50 2014
@@ -71,12 +71,12 @@ STAGE PLANS:
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 5 Data size: 480 Basic stats: COMPLETE Column stats: PARTIAL
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 5 Data size: 480 Basic stats: COMPLETE Column stats: PARTIAL
                     value expressions: _col1 (type: bigint)
           TableScan
             alias: s2
@@ -96,12 +96,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 5 Data size: 480 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 5 Data size: 480 Basic stats: COMPLETE Column stats: PARTIAL
                       value expressions: _col1 (type: bigint)
           TableScan
             alias: s3
@@ -121,12 +121,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 5 Data size: 480 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 5 Data size: 480 Basic stats: COMPLETE Column stats: PARTIAL
                       value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -134,14 +134,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: _col0 (type: string), _col1 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL
             File Output Operator
               compressed: false
-              Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/union17.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/union17.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/union17.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/union17.q.out Sun Aug 10 01:33:50 2014
@@ -110,7 +110,7 @@ STAGE PLANS:
             keys: VALUE._col0 (type: string)
             mode: hash
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 30 Data size: 2640 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 30 Data size: 5760 Basic stats: COMPLETE Column stats: PARTIAL
             File Output Operator
               compressed: false
               table:
@@ -122,7 +122,7 @@ STAGE PLANS:
             keys: VALUE._col0 (type: string), VALUE._col1 (type: string)
             mode: hash
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 30 Data size: 8160 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 30 Data size: 11280 Basic stats: COMPLETE Column stats: PARTIAL
             File Output Operator
               compressed: false
               table:
@@ -138,7 +138,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 30 Data size: 2640 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 30 Data size: 5760 Basic stats: COMPLETE Column stats: PARTIAL
               value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -146,14 +146,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: final
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 15 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 15 Data size: 2880 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: _col0 (type: string), _col1 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 15 Data size: 1440 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 15 Data size: 2880 Basic stats: COMPLETE Column stats: PARTIAL
             File Output Operator
               compressed: false
-              Statistics: Num rows: 15 Data size: 1440 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 15 Data size: 2880 Basic stats: COMPLETE Column stats: PARTIAL
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -181,7 +181,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 30 Data size: 8160 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 30 Data size: 11280 Basic stats: COMPLETE Column stats: PARTIAL
               value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -189,14 +189,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: final
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 15 Data size: 4080 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 15 Data size: 5640 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 15 Data size: 4200 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 15 Data size: 5640 Basic stats: COMPLETE Column stats: PARTIAL
             File Output Operator
               compressed: false
-              Statistics: Num rows: 15 Data size: 4200 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 15 Data size: 5640 Basic stats: COMPLETE Column stats: PARTIAL
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/union19.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/union19.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/union19.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/union19.q.out Sun Aug 10 01:33:50 2014
@@ -88,12 +88,12 @@ STAGE PLANS:
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 30 Data size: 2640 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 30 Data size: 2880 Basic stats: COMPLETE Column stats: PARTIAL
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 30 Data size: 2640 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 30 Data size: 2880 Basic stats: COMPLETE Column stats: PARTIAL
                     value expressions: _col1 (type: bigint)
               Select Operator
                 expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string)
@@ -125,12 +125,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 30 Data size: 2640 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 30 Data size: 2880 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 30 Data size: 2640 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 30 Data size: 2880 Basic stats: COMPLETE Column stats: PARTIAL
                       value expressions: _col1 (type: bigint)
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string)
@@ -150,14 +150,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 15 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 15 Data size: 1500 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: _col0 (type: string), _col1 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 15 Data size: 1440 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 15 Data size: 1500 Basic stats: COMPLETE Column stats: PARTIAL
             File Output Operator
               compressed: false
-              Statistics: Num rows: 15 Data size: 1440 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 15 Data size: 1500 Basic stats: COMPLETE Column stats: PARTIAL
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/union20.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/union20.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/union20.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/union20.q.out Sun Aug 10 01:33:50 2014
@@ -130,14 +130,14 @@ STAGE PLANS:
             0 {KEY.reducesinkkey0} {VALUE._col0}
             1 {KEY.reducesinkkey0} {VALUE._col0}
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 18 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+          Statistics: Num rows: 36 Data size: 19584 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 18 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+            Statistics: Num rows: 36 Data size: 9792 Basic stats: COMPLETE Column stats: PARTIAL
             File Output Operator
               compressed: false
-              Statistics: Num rows: 18 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+              Statistics: Num rows: 36 Data size: 9792 Basic stats: COMPLETE Column stats: PARTIAL
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/union21.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/union21.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/union21.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/union21.q.out Sun Aug 10 01:33:50 2014
@@ -58,12 +58,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                       value expressions: _col1 (type: bigint)
           TableScan
             alias: src_thrift
@@ -83,12 +83,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                       value expressions: _col1 (type: bigint)
           TableScan
             alias: src
@@ -108,12 +108,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                       value expressions: _col1 (type: bigint)
           TableScan
             alias: src
@@ -133,12 +133,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                       value expressions: _col1 (type: bigint)
           TableScan
             alias: src
@@ -158,12 +158,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 132 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
                       value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -171,14 +171,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 66 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+          Statistics: Num rows: 66 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: _col0 (type: string), _col1 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 66 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+            Statistics: Num rows: 66 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
             File Output Operator
               compressed: false
-              Statistics: Num rows: 66 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
+              Statistics: Num rows: 66 Data size: 1056 Basic stats: COMPLETE Column stats: PARTIAL
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/union5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/union5.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/union5.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/union5.q.out Sun Aug 10 01:33:50 2014
@@ -68,12 +68,12 @@ STAGE PLANS:
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col1 (type: bigint)
           TableScan
             Union
@@ -87,12 +87,12 @@ STAGE PLANS:
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -100,14 +100,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/union7.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/union7.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/union7.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/union7.q.out Sun Aug 10 01:33:50 2014
@@ -67,12 +67,12 @@ STAGE PLANS:
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: PARTIAL
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: PARTIAL
                     value expressions: _col1 (type: bigint)
           TableScan
             alias: s2
@@ -92,12 +92,12 @@ STAGE PLANS:
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: PARTIAL
                       value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -105,14 +105,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: _col0 (type: string), _col1 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: PARTIAL
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/vectorization_14.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/vectorization_14.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/vectorization_14.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/vectorization_14.q.out Sun Aug 10 01:33:50 2014
@@ -1,3 +1,147 @@
+PREHOOK: query: EXPLAIN 
+SELECT   ctimestamp1,
+         cfloat,
+         cstring1,
+         cboolean1,
+         cdouble,
+         (-26.28 + cdouble),
+         (-((-26.28 + cdouble))),
+         STDDEV_SAMP((-((-26.28 + cdouble)))),
+         (cfloat * -26.28),
+         MAX(cfloat),
+         (-(cfloat)),
+         (-(MAX(cfloat))),
+         ((-((-26.28 + cdouble))) / 10.175),
+         STDDEV_POP(cfloat),
+         COUNT(cfloat),
+         (-(((-((-26.28 + cdouble))) / 10.175))),
+         (-1.389 % STDDEV_SAMP((-((-26.28 + cdouble))))),
+         (cfloat - cdouble),
+         VAR_POP(cfloat),
+         (VAR_POP(cfloat) % 10.175),
+         VAR_SAMP(cfloat),
+         (-((cfloat - cdouble)))
+FROM     alltypesorc
+WHERE    (((ctinyint <= cbigint)
+           AND ((cint <= cdouble)
+                OR (ctimestamp2 < ctimestamp1)))
+          AND ((cdouble < ctinyint)
+              AND ((cbigint > -257)
+                  OR (cfloat < cint))))
+GROUP BY ctimestamp1, cfloat, cstring1, cboolean1, cdouble
+ORDER BY cstring1, cfloat, cdouble, ctimestamp1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+SELECT   ctimestamp1,
+         cfloat,
+         cstring1,
+         cboolean1,
+         cdouble,
+         (-26.28 + cdouble),
+         (-((-26.28 + cdouble))),
+         STDDEV_SAMP((-((-26.28 + cdouble)))),
+         (cfloat * -26.28),
+         MAX(cfloat),
+         (-(cfloat)),
+         (-(MAX(cfloat))),
+         ((-((-26.28 + cdouble))) / 10.175),
+         STDDEV_POP(cfloat),
+         COUNT(cfloat),
+         (-(((-((-26.28 + cdouble))) / 10.175))),
+         (-1.389 % STDDEV_SAMP((-((-26.28 + cdouble))))),
+         (cfloat - cdouble),
+         VAR_POP(cfloat),
+         (VAR_POP(cfloat) % 10.175),
+         VAR_SAMP(cfloat),
+         (-((cfloat - cdouble)))
+FROM     alltypesorc
+WHERE    (((ctinyint <= cbigint)
+           AND ((cint <= cdouble)
+                OR (ctimestamp2 < ctimestamp1)))
+          AND ((cdouble < ctinyint)
+              AND ((cbigint > -257)
+                  OR (cfloat < cint))))
+GROUP BY ctimestamp1, cfloat, cstring1, cboolean1, cdouble
+ORDER BY cstring1, cfloat, cdouble, ctimestamp1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypesorc
+            Statistics: Num rows: 1779 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((((ctinyint <= cbigint) and ((cint <= cdouble) or (ctimestamp2 < ctimestamp1))) and (cdouble < ctinyint)) and ((cbigint > (- 257)) or (cfloat < cint))) (type: boolean)
+              Statistics: Num rows: 86 Data size: 18236 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double)
+                outputColumnNames: ctimestamp1, cfloat, cstring1, cboolean1, cdouble
+                Statistics: Num rows: 86 Data size: 18236 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: stddev_samp((- ((- 26.28) + cdouble))), max(cfloat), stddev_pop(cfloat), count(cfloat), var_pop(cfloat), var_samp(cfloat)
+                  keys: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                  Statistics: Num rows: 86 Data size: 18236 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: timestamp), _col1 (type: float), _col2 (type: string), _col3 (type: boolean), _col4 (type: double)
+                    sort order: +++++
+                    Map-reduce partition columns: _col0 (type: timestamp), _col1 (type: float), _col2 (type: string), _col3 (type: boolean), _col4 (type: double)
+                    Statistics: Num rows: 86 Data size: 18236 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: float), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: bigint), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: stddev_samp(VALUE._col0), max(VALUE._col1), stddev_pop(VALUE._col2), count(VALUE._col3), var_pop(VALUE._col4), var_samp(VALUE._col5)
+          keys: KEY._col0 (type: timestamp), KEY._col1 (type: float), KEY._col2 (type: string), KEY._col3 (type: boolean), KEY._col4 (type: double)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+          Statistics: Num rows: 43 Data size: 9118 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: timestamp), _col1 (type: float), _col2 (type: string), _col3 (type: boolean), _col4 (type: double), ((- 26.28) + _col4) (type: double), (- ((- 26.28) + _col4)) (type: double), _col5 (type: double), (_col1 * (- 26.28)) (type: double), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- ((- 26.28) + _col4)) / 10.175) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- ((- 26.28) + _col4)) / 10.175)) (type: double), ((- 1.389) % _col5) (type: double), (_col1 - _col4) (type: double), _col9 (type: double), (_col9 % 10.175) (type: double), _col10 (type: double), (- (_col1 - _col4)) (type: double)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21
+            Statistics: Num rows: 43 Data size: 9118 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col2 (type: string), _col1 (type: float), _col4 (type: double), _col0 (type: timestamp)
+              sort order: ++++
+              Statistics: Num rows: 43 Data size: 9118 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col3 (type: boolean), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: float), _col10 (type: float), _col11 (type: float), _col12 (type: double), _col13 (type: double), _col14 (type: bigint), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: double), _col19 (type: double), _col20 (type: double), _col21 (type: double)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey3 (type: timestamp), KEY.reducesinkkey1 (type: float), KEY.reducesinkkey0 (type: string), VALUE._col0 (type: boolean), KEY.reducesinkkey2 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: float), VALUE._col6 (type: float), VALUE._col7 (type: float), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: bigint), VALUE._col11 (type: double), VALUE._col12 (type: double), VALUE._col13 (type: double), VALUE._col14 (type: double), VALUE._col15 (type: double), VALUE._col16 (type: double), VALUE._col17 (type: double)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21
+          Statistics: Num rows: 43 Data size: 9118 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 43 Data size: 9118 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: SELECT   ctimestamp1,
          cfloat,
          cstring1,

Modified: hive/branches/spark/ql/src/test/results/clientpositive/vectorization_15.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/vectorization_15.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/vectorization_15.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/vectorization_15.q.out Sun Aug 10 01:33:50 2014
@@ -1,3 +1,143 @@
+PREHOOK: query: EXPLAIN 
+SELECT   cfloat,
+         cboolean1,
+         cdouble,
+         cstring1,
+         ctinyint,
+         cint,
+         ctimestamp1,
+         STDDEV_SAMP(cfloat),
+         (-26.28 - cint),
+         MIN(cdouble),
+         (cdouble * 79.553),
+         (33 % cfloat),
+         STDDEV_SAMP(ctinyint),
+         VAR_POP(ctinyint),
+         (-23 % cdouble),
+         (-(ctinyint)),
+         VAR_SAMP(cint),
+         (cint - cfloat),
+         (-23 % ctinyint),
+         (-((-26.28 - cint))),
+         STDDEV_POP(cint)
+FROM     alltypesorc
+WHERE    (((cstring2 LIKE '%ss%')
+           OR (cstring1 LIKE '10%'))
+          OR ((cint >= -75)
+              AND ((ctinyint = csmallint)
+                   AND (cdouble >= -3728))))
+GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+SELECT   cfloat,
+         cboolean1,
+         cdouble,
+         cstring1,
+         ctinyint,
+         cint,
+         ctimestamp1,
+         STDDEV_SAMP(cfloat),
+         (-26.28 - cint),
+         MIN(cdouble),
+         (cdouble * 79.553),
+         (33 % cfloat),
+         STDDEV_SAMP(ctinyint),
+         VAR_POP(ctinyint),
+         (-23 % cdouble),
+         (-(ctinyint)),
+         VAR_SAMP(cint),
+         (cint - cfloat),
+         (-23 % ctinyint),
+         (-((-26.28 - cint))),
+         STDDEV_POP(cint)
+FROM     alltypesorc
+WHERE    (((cstring2 LIKE '%ss%')
+           OR (cstring1 LIKE '10%'))
+          OR ((cint >= -75)
+              AND ((ctinyint = csmallint)
+                   AND (cdouble >= -3728))))
+GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypesorc
+            Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((cstring2 like '%ss%') or (cstring1 like '10%')) or ((cint >= (- 75)) and ((ctinyint = csmallint) and (cdouble >= (- 3728))))) (type: boolean)
+              Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)
+                outputColumnNames: cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+                Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: stddev_samp(cfloat), min(cdouble), stddev_samp(ctinyint), var_pop(ctinyint), var_samp(cint), stddev_pop(cint)
+                  keys: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+                  Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                    sort order: +++++++
+                    Map-reduce partition columns: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                    Statistics: Num rows: 1407 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: double), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>), _col11 (type: struct<count:bigint,sum:double,variance:double>), _col12 (type: struct<count:bigint,sum:double,variance:double>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: stddev_samp(VALUE._col0), min(VALUE._col1), stddev_samp(VALUE._col2), var_pop(VALUE._col3), var_samp(VALUE._col4), stddev_pop(VALUE._col5)
+          keys: KEY._col0 (type: float), KEY._col1 (type: boolean), KEY._col2 (type: double), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int), KEY._col6 (type: timestamp)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+          Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), ((- 26.28) - _col5) (type: double), _col8 (type: double), (_col2 * 79.553) (type: double), (33 % _col0) (type: float), _col9 (type: double), _col10 (type: double), ((- 23) % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (_col5 - _col0) (type: float), ((- 23) % _col4) (type: int), (- ((- 26.28) - _col5)) (type: double), _col12 (type: double)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+            Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+              sort order: +++++++
+              Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: float), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: tinyint), _col16 (type: double), _col17 (type: float), _col18 (type: int), _col19 (type: double), _col20 (type: double)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: float), KEY.reducesinkkey1 (type: boolean), KEY.reducesinkkey2 (type: double), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: tinyint), KEY.reducesinkkey5 (type: int), KEY.reducesinkkey6 (type: timestamp), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: float), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: tinyint), VALUE._col9 (type: double), VALUE._col10 (type: float), VALUE._col11 (type: int), VALUE._col12 (type: double), VALUE._col13 (type: double)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+          Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 703 Data size: 188484 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: SELECT   cfloat,
          cboolean1,
          cdouble,

Modified: hive/branches/spark/ql/src/test/results/clientpositive/vectorization_16.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/vectorization_16.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/vectorization_16.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/vectorization_16.q.out Sun Aug 10 01:33:50 2014
@@ -1,3 +1,99 @@
+PREHOOK: query: EXPLAIN 
+SELECT   cstring1,
+         cdouble,
+         ctimestamp1,
+         (cdouble - 9763215.5639),
+         (-((cdouble - 9763215.5639))),
+         COUNT(cdouble),
+         STDDEV_SAMP(cdouble),
+         (-(STDDEV_SAMP(cdouble))),
+         (STDDEV_SAMP(cdouble) * COUNT(cdouble)),
+         MIN(cdouble),
+         (9763215.5639 / cdouble),
+         (COUNT(cdouble) / -1.389),
+         STDDEV_SAMP(cdouble)
+FROM     alltypesorc
+WHERE    ((cstring2 LIKE '%b%')
+          AND ((cdouble >= -1.389)
+              OR (cstring1 < 'a')))
+GROUP BY cstring1, cdouble, ctimestamp1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+SELECT   cstring1,
+         cdouble,
+         ctimestamp1,
+         (cdouble - 9763215.5639),
+         (-((cdouble - 9763215.5639))),
+         COUNT(cdouble),
+         STDDEV_SAMP(cdouble),
+         (-(STDDEV_SAMP(cdouble))),
+         (STDDEV_SAMP(cdouble) * COUNT(cdouble)),
+         MIN(cdouble),
+         (9763215.5639 / cdouble),
+         (COUNT(cdouble) / -1.389),
+         STDDEV_SAMP(cdouble)
+FROM     alltypesorc
+WHERE    ((cstring2 LIKE '%b%')
+          AND ((cdouble >= -1.389)
+              OR (cstring1 < 'a')))
+GROUP BY cstring1, cdouble, ctimestamp1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypesorc
+            Statistics: Num rows: 1521 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((cstring2 like '%b%') and ((cdouble >= (- 1.389)) or (cstring1 < 'a'))) (type: boolean)
+              Statistics: Num rows: 506 Data size: 125497 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
+                outputColumnNames: cstring1, cdouble, ctimestamp1
+                Statistics: Num rows: 506 Data size: 125497 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                  keys: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 506 Data size: 125497 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                    sort order: +++
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                    Statistics: Num rows: 506 Data size: 125497 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
+          keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          Statistics: Num rows: 253 Data size: 62748 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * _col3) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (_col3 / (- 1.389)) (type: double), _col4 (type: double)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+            Statistics: Num rows: 253 Data size: 62748 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 253 Data size: 62748 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: SELECT   cstring1,
          cdouble,
          ctimestamp1,

Modified: hive/branches/spark/ql/src/test/results/clientpositive/vectorization_9.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/vectorization_9.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/vectorization_9.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/vectorization_9.q.out Sun Aug 10 01:33:50 2014
@@ -1,3 +1,99 @@
+PREHOOK: query: EXPLAIN 
+SELECT   cstring1,
+         cdouble,
+         ctimestamp1,
+         (cdouble - 9763215.5639),
+         (-((cdouble - 9763215.5639))),
+         COUNT(cdouble),
+         STDDEV_SAMP(cdouble),
+         (-(STDDEV_SAMP(cdouble))),
+         (STDDEV_SAMP(cdouble) * COUNT(cdouble)),
+         MIN(cdouble),
+         (9763215.5639 / cdouble),
+         (COUNT(cdouble) / -1.389),
+         STDDEV_SAMP(cdouble)
+FROM     alltypesorc
+WHERE    ((cstring2 LIKE '%b%')
+          AND ((cdouble >= -1.389)
+              OR (cstring1 < 'a')))
+GROUP BY cstring1, cdouble, ctimestamp1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+SELECT   cstring1,
+         cdouble,
+         ctimestamp1,
+         (cdouble - 9763215.5639),
+         (-((cdouble - 9763215.5639))),
+         COUNT(cdouble),
+         STDDEV_SAMP(cdouble),
+         (-(STDDEV_SAMP(cdouble))),
+         (STDDEV_SAMP(cdouble) * COUNT(cdouble)),
+         MIN(cdouble),
+         (9763215.5639 / cdouble),
+         (COUNT(cdouble) / -1.389),
+         STDDEV_SAMP(cdouble)
+FROM     alltypesorc
+WHERE    ((cstring2 LIKE '%b%')
+          AND ((cdouble >= -1.389)
+              OR (cstring1 < 'a')))
+GROUP BY cstring1, cdouble, ctimestamp1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypesorc
+            Statistics: Num rows: 1521 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((cstring2 like '%b%') and ((cdouble >= (- 1.389)) or (cstring1 < 'a'))) (type: boolean)
+              Statistics: Num rows: 506 Data size: 125497 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
+                outputColumnNames: cstring1, cdouble, ctimestamp1
+                Statistics: Num rows: 506 Data size: 125497 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                  keys: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 506 Data size: 125497 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                    sort order: +++
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                    Statistics: Num rows: 506 Data size: 125497 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
+          keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          Statistics: Num rows: 253 Data size: 62748 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * _col3) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (_col3 / (- 1.389)) (type: double), _col4 (type: double)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+            Statistics: Num rows: 253 Data size: 62748 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 253 Data size: 62748 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: SELECT   cfloat,
          cstring1,
          cint,

Modified: hive/branches/spark/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out Sun Aug 10 01:33:50 2014
@@ -959,3 +959,14 @@ POSTHOOK: Input: default@date_udf_flight
 2010-10-20	2010-10-22	2010-10-18	-2	2	4
 2010-10-21	2010-10-23	2010-10-19	-2	2	4
 2010-10-21	2010-10-23	2010-10-19	-2	2	4
+PREHOOK: query: -- Test extracting the date part of expression that includes time
+SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf_flight_orc
+#### A masked pattern was here ####
+POSTHOOK: query: -- Test extracting the date part of expression that includes time
+SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf_flight_orc
+#### A masked pattern was here ####
+2009-07-30

Modified: hive/branches/spark/ql/src/test/templates/TestCliDriver.vm
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/templates/TestCliDriver.vm?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/templates/TestCliDriver.vm (original)
+++ hive/branches/spark/ql/src/test/templates/TestCliDriver.vm Sun Aug 10 01:33:50 2014
@@ -37,9 +37,12 @@ public class $className extends TestCase
 
     MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
     String hiveConfDir = "$hiveConfDir";
+    String initScript = "$initScript";
+    String cleanupScript = "$cleanupScript";
     try {
       String hadoopVer = "$hadoopVersion";
-      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hiveConfDir, hadoopVer);
+      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR,
+      hiveConfDir, hadoopVer, initScript, cleanupScript);
 
       // do a one time initialization
       qt.cleanUp();

Modified: hive/branches/spark/ql/src/test/templates/TestCompareCliDriver.vm
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/templates/TestCompareCliDriver.vm?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/templates/TestCompareCliDriver.vm (original)
+++ hive/branches/spark/ql/src/test/templates/TestCompareCliDriver.vm Sun Aug 10 01:33:50 2014
@@ -38,9 +38,12 @@ public class $className extends TestCase
 
     MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
     String hiveConfDir = "$hiveConfDir";
+    String initScript = "$initScript";
+    String cleanupScript = "$cleanupScript";
     try {
       String hadoopVer = "$hadoopVersion";
-      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hiveConfDir, hadoopVer);
+      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR,
+      hiveConfDir, hadoopVer, initScript, cleanupScript);
 
       // do a one time initialization
       qt.cleanUp();

Modified: hive/branches/spark/ql/src/test/templates/TestNegativeCliDriver.vm
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/templates/TestNegativeCliDriver.vm?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/templates/TestNegativeCliDriver.vm (original)
+++ hive/branches/spark/ql/src/test/templates/TestNegativeCliDriver.vm Sun Aug 10 01:33:50 2014
@@ -35,10 +35,13 @@ public class $className extends TestCase
 
   static {
     MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
+    String initScript = "$initScript";
+    String cleanupScript = "$cleanupScript";
 
     try {
       String hadoopVer = "$hadoopVersion";
-      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer);
+      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer,
+       initScript, cleanupScript);
       // do a one time initialization
       qt.cleanUp();
       qt.createSources();

Modified: hive/branches/spark/ql/src/test/templates/TestParse.vm
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/templates/TestParse.vm?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/templates/TestParse.vm (original)
+++ hive/branches/spark/ql/src/test/templates/TestParse.vm Sun Aug 10 01:33:50 2014
@@ -35,10 +35,13 @@ public class $className extends TestCase
   
   static {
     MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
+    String initScript = "$initScript";
+    String cleanupScript = "$cleanupScript";
 
     try {
       String hadoopVer = "$hadoopVersion";
-      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer);
+      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer,
+       initScript, cleanupScript);
       qt.init(null);
     } catch (Exception e) {
       System.err.println("Exception: " + e.getMessage());

Modified: hive/branches/spark/ql/src/test/templates/TestParseNegative.vm
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/templates/TestParseNegative.vm?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/templates/TestParseNegative.vm (original)
+++ hive/branches/spark/ql/src/test/templates/TestParseNegative.vm Sun Aug 10 01:33:50 2014
@@ -36,10 +36,13 @@ public class $className extends TestCase
   static {
 
     MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
+    String initScript = "$initScript";
+    String cleanupScript = "$cleanupScript";
 
     try {
       String hadoopVer = "$hadoopVersion";
-      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer);
+      qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer,
+       initScript, cleanupScript);
     } catch (Exception e) {
       System.err.println("Exception: " + e.getMessage());
       e.printStackTrace();

Modified: hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java (original)
+++ hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java Sun Aug 10 01:33:50 2014
@@ -20,7 +20,9 @@ package org.apache.hive.service.cli.thri
 
 import java.net.InetSocketAddress;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hive.service.auth.HiveAuthFactory;
 import org.apache.hive.service.cli.CLIService;
 import org.apache.thrift.TProcessorFactory;
@@ -73,8 +75,10 @@ public class ThriftBinaryCLIService exte
           throw new IllegalArgumentException(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname +
               " Not configured for SSL connection");
         }
+        String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf,
+            HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname);
         serverSocket = HiveAuthFactory.getServerSSLSocket(hiveHost, portNum,
-            keyStorePath, hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD));
+            keyStorePath, keyStorePassword);
       }
       TThreadPoolServer.Args sargs = new TThreadPoolServer.Args(serverSocket)
       .processorFactory(processorFactory)

Modified: hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java (original)
+++ hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java Sun Aug 10 01:33:50 2014
@@ -594,7 +594,7 @@ public abstract class ThriftCLIService e
 
   private boolean isKerberosAuthMode() {
     return cliService.getHiveConf().getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION)
-        .equals(HiveAuthFactory.AuthTypes.KERBEROS.toString());
+        .equalsIgnoreCase(HiveAuthFactory.AuthTypes.KERBEROS.toString());
   }
 
 }

Modified: hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java (original)
+++ hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java Sun Aug 10 01:33:50 2014
@@ -20,6 +20,7 @@ package org.apache.hive.service.cli.thri
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Shell;
 import org.apache.hive.service.auth.HiveAuthFactory;
@@ -83,7 +84,8 @@ public class ThriftHttpCLIService extend
 
       if (useSsl) {
         String keyStorePath = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH).trim();
-        String keyStorePassword = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD);
+        String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf,
+            HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname);
         if (keyStorePath.isEmpty()) {
           throw new IllegalArgumentException(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname +
               " Not configured for SSL connection");

Modified: hive/branches/spark/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java (original)
+++ hive/branches/spark/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java Sun Aug 10 01:33:50 2014
@@ -25,6 +25,7 @@ import java.net.MalformedURLException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
+import java.security.AccessControlException;
 import java.security.PrivilegedActionException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -43,6 +44,7 @@ import javax.security.auth.login.LoginEx
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.DefaultFileAccess;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -52,6 +54,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.ProxyFileSystem;
 import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
 import org.apache.hadoop.io.LongWritable;
@@ -880,4 +883,16 @@ public class Hadoop20Shims implements Ha
     LOG.debug(ArrayUtils.toString(command));
     shell.run(command);
   }
+
+  @Override
+  public void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action)
+      throws IOException, AccessControlException, Exception {
+    DefaultFileAccess.checkFileAccess(fs, stat, action);
+  }
+
+  @Override
+  public String getPassword(Configuration conf, String name) {
+    // No password API, just retrieve value from conf
+    return conf.get(name);
+  }
 }

Modified: hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java (original)
+++ hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java Sun Aug 10 01:33:50 2014
@@ -507,4 +507,10 @@ public class Hadoop20SShims extends Hado
   public void getMergedCredentials(JobConf jobConf) throws IOException {
     throw new IOException("Merging of credentials not supported in this version of hadoop");
   }
+
+  @Override
+  public String getPassword(Configuration conf, String name) {
+    // No password API, just retrieve value from conf
+    return conf.get(name);
+  }
 }

Modified: hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (original)
+++ hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java Sun Aug 10 01:33:50 2014
@@ -19,10 +19,15 @@ package org.apache.hadoop.hive.shims;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URI;
+import java.security.AccessControlException;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
@@ -32,6 +37,7 @@ import org.apache.commons.lang.ArrayUtil
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.DefaultFileAccess;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -651,6 +657,34 @@ public class Hadoop23Shims extends Hadoo
         }
       };
     }
+
+    /**
+     * Proxy file system also needs to override the access() method behavior.
+     * Cannot add Override annotation since FileSystem.access() may not exist in
+     * the version of hadoop used to build Hive.
+     */
+    public void access(Path path, FsAction action) throws AccessControlException,
+        FileNotFoundException, IOException {
+      Path underlyingFsPath = swizzleParamPath(path);
+      FileStatus underlyingFsStatus = fs.getFileStatus(underlyingFsPath);
+      try {
+        if (accessMethod != null) {
+            accessMethod.invoke(fs, underlyingFsPath, action);
+        } else {
+          // If the FS has no access() method, we can try DefaultFileAccess ..
+          UserGroupInformation ugi = getUGIForConf(getConf());
+          DefaultFileAccess.checkFileAccess(fs, underlyingFsStatus, action);
+        }
+      } catch (AccessControlException err) {
+        throw err;
+      } catch (FileNotFoundException err) {
+        throw err;
+      } catch (IOException err) {
+        throw err;
+      } catch (Exception err) {
+        throw new RuntimeException(err.getMessage(), err);
+      }
+    }
   }
 
   @Override
@@ -709,4 +743,77 @@ public class Hadoop23Shims extends Hadoo
   public void getMergedCredentials(JobConf jobConf) throws IOException {
     jobConf.getCredentials().mergeAll(UserGroupInformation.getCurrentUser().getCredentials());
   }
+
+  protected static final Method accessMethod;
+  protected static final Method getPasswordMethod;
+
+  static {
+    Method m = null;
+    try {
+      m = FileSystem.class.getMethod("access", Path.class, FsAction.class);
+    } catch (NoSuchMethodException err) {
+      // This version of Hadoop does not support FileSystem.access().
+    }
+    accessMethod = m;
+
+    try {
+      m = Configuration.class.getMethod("getPassword", String.class);
+    } catch (NoSuchMethodException err) {
+      // This version of Hadoop does not support getPassword(), just retrieve password from conf.
+      m = null;
+    }
+    getPasswordMethod = m;
+  }
+
+  @Override
+  public void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action)
+      throws IOException, AccessControlException, Exception {
+    try {
+      if (accessMethod == null) {
+        // Have to rely on Hive implementation of filesystem permission checks.
+        DefaultFileAccess.checkFileAccess(fs, stat, action);
+      } else {
+        accessMethod.invoke(fs, stat.getPath(), action);
+      }
+    } catch (Exception err) {
+      throw wrapAccessException(err);
+    }
+  }
+
+  /**
+   * If there is an AccessException buried somewhere in the chain of failures, wrap the original
+   * exception in an AccessException. Othewise just return the original exception.
+   */
+  private static Exception wrapAccessException(Exception err) {
+    final int maxDepth = 20;
+    Throwable curErr = err;
+    for (int idx = 0; curErr != null && idx < maxDepth; ++idx) {
+      if (curErr instanceof org.apache.hadoop.security.AccessControlException
+          || curErr instanceof org.apache.hadoop.fs.permission.AccessControlException) {
+        Exception newErr = new AccessControlException(curErr.getMessage());
+        newErr.initCause(err);
+        return newErr;
+      }
+      curErr = curErr.getCause();
+    }
+    return err;
+  }
+
+  @Override
+  public String getPassword(Configuration conf, String name) throws IOException {
+    if (getPasswordMethod == null) {
+      // Just retrieve value from conf
+      return conf.get(name);
+    } else {
+      try {
+        char[] pw = (char[]) getPasswordMethod.invoke(conf, name);
+        if (pw == null) {
+          return null;
+        }
+        return new String(pw);
+      } catch (Exception err) {
+        throw new IOException(err.getMessage(), err);
+      }
+    }
+  }
 }

Modified: hive/branches/spark/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java (original)
+++ hive/branches/spark/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java Sun Aug 10 01:33:50 2014
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.security.AccessControlException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -32,14 +33,19 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
+import javax.security.auth.login.LoginException;
+
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DefaultFileAccess;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
 import org.apache.hadoop.hive.thrift.DelegationTokenIdentifier;
 import org.apache.hadoop.hive.thrift.DelegationTokenSelector;
@@ -663,4 +669,10 @@ public abstract class HadoopShimsSecure 
     Collections.addAll(dedup, locations);
     return dedup.toArray(new String[dedup.size()]);
   }
+
+  @Override
+  public void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action)
+      throws IOException, AccessControlException, Exception {
+    DefaultFileAccess.checkFileAccess(fs, stat, action);
+  }
 }

Modified: hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java (original)
+++ hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java Sun Aug 10 01:33:50 2014
@@ -25,6 +25,7 @@ import java.net.MalformedURLException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
+import java.security.AccessControlException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Comparator;
 import java.util.List;
@@ -42,6 +43,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.mapred.ClusterStatus;
 import org.apache.hadoop.mapred.InputSplit;
@@ -669,4 +671,27 @@ public interface HadoopShims {
 
   public void getMergedCredentials(JobConf jobConf) throws IOException;
 
+  /**
+   * Check if the configured UGI has access to the path for the given file system action.
+   * Method will return successfully if action is permitted. AccessControlExceptoin will
+   * be thrown if user does not have access to perform the action. Other exceptions may
+   * be thrown for non-access related errors.
+   * @param fs
+   * @param status
+   * @param action
+   * @throws IOException
+   * @throws AccessControlException
+   * @throws Exception
+   */
+  public void checkFileAccess(FileSystem fs, FileStatus status, FsAction action)
+      throws IOException, AccessControlException, Exception;
+
+  /**
+   * Use password API (if available) to fetch credentials/password
+   * @param conf
+   * @param name
+   * @return
+   */
+  public String getPassword(Configuration conf, String name) throws IOException;
+
 }