You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/10/30 13:31:52 UTC

svn commit: r1635477 [16/17] - in /hive/branches/spark: common/src/java/org/apache/hadoop/hive/common/ data/conf/spark/ ql/src/java/org/apache/hadoop/hive/ql/exec/spark/ ql/src/java/org/apache/hadoop/hive/ql/exec/spark/counter/ ql/src/java/org/apache/h...

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_1.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_1.q.out Thu Oct 30 12:31:47 2014
@@ -23,7 +23,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_1
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_1
   select cast('2011-01-01 01:01:01' as timestamp) from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -107,7 +106,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_1
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_1
   select '2011-01-01 01:01:01' from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -191,7 +189,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_1
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_1
   select '2011-01-01 01:01:01.1' from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -275,7 +272,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_1
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_1
   select '2011-01-01 01:01:01.0001' from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -359,7 +355,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_1
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_1
   select '2011-01-01 01:01:01.000100000' from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -443,7 +438,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_1
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_1
   select '2011-01-01 01:01:01.001000011' from src tablesample (1 rows)
 POSTHOOK: type: QUERY

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_2.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_2.q.out Thu Oct 30 12:31:47 2014
@@ -23,7 +23,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_2
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_2
   select cast('2011-01-01 01:01:01' as timestamp) from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -107,7 +106,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_2
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_2
   select '2011-01-01 01:01:01' from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -191,7 +189,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_2
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_2
   select '2011-01-01 01:01:01.1' from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -275,7 +272,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_2
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_2
   select '2011-01-01 01:01:01.0001' from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -359,7 +355,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_2
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_2
   select '2011-01-01 01:01:01.000100000' from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -443,7 +438,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_2
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_2
   select '2011-01-01 01:01:01.001000011' from src tablesample (1 rows)
 POSTHOOK: type: QUERY

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_3.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_3.q.out Thu Oct 30 12:31:47 2014
@@ -23,7 +23,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_3
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_3
   select cast(cast('1.3041352164485E9' as double) as timestamp) from src tablesample (1 rows)
 POSTHOOK: type: QUERY

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_lazy.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_lazy.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_lazy.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_lazy.q.out Thu Oct 30 12:31:47 2014
@@ -14,7 +14,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_lazy
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table timestamp_lazy select cast('2011-01-01 01:01:01' as timestamp), key, value from src tablesample (5 rows)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_udf.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_udf.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_udf.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/timestamp_udf.q.out Thu Oct 30 12:31:47 2014
@@ -31,8 +31,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@timestamp_udf
 PREHOOK: Output: default@timestamp_udf_string
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: from (select * from src tablesample (1 rows)) s
   insert overwrite table timestamp_udf 
     select '2011-05-06 07:08:09.1234567'

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/transform1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/transform1.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/transform1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/transform1.q.out Thu Oct 30 12:31:47 2014
@@ -72,7 +72,6 @@ select array(1,2,3) from src tablesample
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@transform1_t2
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table transform1_t2
 select array(1,2,3) from src tablesample (1 rows)
 POSTHOOK: type: QUERY
@@ -98,21 +97,21 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: transform1_t2
-                  Statistics: Num rows: -1 Data size: 6 Basic stats: PARTIAL Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: '012' (type: string)
                     outputColumnNames: _col0
-                    Statistics: Num rows: -1 Data size: 6 Basic stats: PARTIAL Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
                     Transform Operator
                       command: cat
                       output info:
                           input format: org.apache.hadoop.mapred.TextInputFormat
                           output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      Statistics: Num rows: -1 Data size: 6 Basic stats: PARTIAL Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: -1 Data size: 6 Basic stats: PARTIAL Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
                         table:
                             input format: org.apache.hadoop.mapred.TextInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out Thu Oct 30 12:31:47 2014
@@ -175,7 +175,6 @@ PREHOOK: query: insert overwrite table t
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@tmptable
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table tmptable
   select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1
                                         UNION  ALL  

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out Thu Oct 30 12:31:47 2014
@@ -161,8 +161,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@dest1
 PREHOOK: Output: default@dest2
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  
       select s2.key as key, s2.value as value from src s2) unionsrc

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out Thu Oct 30 12:31:47 2014
@@ -188,8 +188,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@dest1
 PREHOOK: Output: default@dest2
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  
       select s2.key as key, s2.value as value from src s2) unionsrc

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union25.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union25.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union25.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union25.q.out Thu Oct 30 12:31:47 2014
@@ -12,7 +12,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Output: default@tmp_srcpart@ds=2008-04-08/hr=11
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table tmp_srcpart partition (ds='2008-04-08', hr='11')
 select key, value from srcpart where ds='2008-04-08' and hr='11'
 POSTHOOK: type: QUERY
@@ -159,14 +158,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 279 Data size: 5562 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col2 (type: bigint), _col0 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 279 Data size: 5562 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 279 Data size: 5562 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out Thu Oct 30 12:31:47 2014
@@ -185,7 +185,6 @@ select * from (
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@union_subq_union
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table union_subq_union 
 select * from (
   select key, value from src 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union29.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union29.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union29.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union29.q.out Thu Oct 30 12:31:47 2014
@@ -135,7 +135,6 @@ select * from (
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@union_subq_union
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table union_subq_union 
 select * from (
   select key, value from src 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out Thu Oct 30 12:31:47 2014
@@ -238,7 +238,6 @@ FROM (
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@union_out
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table union_out 
 SELECT *
 FROM (

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out Thu Oct 30 12:31:47 2014
@@ -232,7 +232,6 @@ select key, value from src
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@union_subq_union
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table union_subq_union 
 select * from (
 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out Thu Oct 30 12:31:47 2014
@@ -151,7 +151,6 @@ UNION ALL
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@test_src
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (
 	SELECT key, value FROM src 
@@ -311,7 +310,6 @@ UNION ALL
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@test_src
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (
 	SELECT key, COUNT(*) AS value FROM src

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out Thu Oct 30 12:31:47 2014
@@ -135,7 +135,6 @@ select unionsrc.key, unionsrc.value FROM
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@tmptable
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1
                                         UNION  ALL  

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out Thu Oct 30 12:31:47 2014
@@ -117,7 +117,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
 PREHOOK: Output: default@tmptable
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                                       UNION  ALL  

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_between_in.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_between_in.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_between_in.q.out Thu Oct 30 12:31:47 2014
@@ -3,7 +3,6 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@decimal_date_test
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate
 POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
@@ -28,28 +27,28 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_date_test
-                  Statistics: Num rows: 1952 Data size: 109320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean)
-                    Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdate (type: date)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: date)
                         sort order: +
-                        Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date)
                 outputColumnNames: _col0
-                Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -81,12 +80,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_date_test
-                  Statistics: Num rows: 1952 Data size: 109320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (not (cdate) IN (1969-10-26, 1969-07-14, 1970-01-21)) (type: boolean)
-                    Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
                         mode: hash
@@ -142,28 +141,28 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_date_test
-                  Statistics: Num rows: 976 Data size: 109320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean)
-                    Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdecimal1 (type: decimal(20,10))
                       outputColumnNames: _col0
-                      Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: decimal(20,10))
                         sort order: +
-                        Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(20,10))
                 outputColumnNames: _col0
-                Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -195,12 +194,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_date_test
-                  Statistics: Num rows: 976 Data size: 109320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (not (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568)) (type: boolean)
-                    Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
                         mode: hash
@@ -256,28 +255,28 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_date_test
-                  Statistics: Num rows: 1952 Data size: 109320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean)
-                    Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdate (type: date)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: date)
                         sort order: +
-                        Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date)
                 outputColumnNames: _col0
-                Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -309,28 +308,28 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_date_test
-                  Statistics: Num rows: 1952 Data size: 109320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: cdate NOT BETWEEN 1968-05-01 AND 1971-09-01 (type: boolean)
-                    Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdate (type: date)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: date)
                         sort order: +
-                        Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date)
                 outputColumnNames: _col0
-                Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 976 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -362,28 +361,28 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_date_test
-                  Statistics: Num rows: 976 Data size: 109320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: cdecimal1 BETWEEN -20 AND 45.9918918919 (type: boolean)
-                    Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdecimal1 (type: decimal(20,10))
                       outputColumnNames: _col0
-                      Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: decimal(20,10))
                         sort order: +
-                        Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(20,10))
                 outputColumnNames: _col0
-                Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -415,12 +414,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_date_test
-                  Statistics: Num rows: 976 Data size: 109320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean)
-                    Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      Statistics: Num rows: 488 Data size: 54660 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
                         mode: hash

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out Thu Oct 30 12:31:47 2014
@@ -82,7 +82,6 @@ PREHOOK: query: INSERT INTO TABLE over1k
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k
 PREHOOK: Output: default@over1korc
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k
@@ -128,22 +127,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: over1korc
-                  Statistics: Num rows: 5898 Data size: 23594 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: i (type: int)
                     outputColumnNames: i
-                    Statistics: Num rows: 5898 Data size: 23594 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: avg(50), avg(UDFToDouble(50)), avg(CAST( 50 AS decimal(10,0)))
                       keys: i (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 5898 Data size: 23594 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 5898 Data size: 23594 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
             Execution mode: vectorized
         Reducer 2 
@@ -153,28 +152,28 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2949 Data size: 11797 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 2949 Data size: 11797 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: int)
                     sort order: +
-                    Statistics: Num rows: 2949 Data size: 11797 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2949 Data size: 11797 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
-                  Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_char_4.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_char_4.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_char_4.q.out Thu Oct 30 12:31:47 2014
@@ -92,7 +92,6 @@ PREHOOK: query: INSERT INTO TABLE vector
 PREHOOK: type: QUERY
 PREHOOK: Input: default@vectortab2k
 PREHOOK: Output: default@vectortab2korc
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vectortab2k
@@ -143,14 +142,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vectortab2korc
-                  Statistics: Num rows: 731 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: CAST( t AS CHAR(10) (type: char(10)), CAST( si AS CHAR(10) (type: char(10)), CAST( i AS CHAR(20) (type: char(20)), CAST( b AS CHAR(30) (type: char(30)), CAST( f AS CHAR(20) (type: char(20)), CAST( d AS CHAR(20) (type: char(20)), CAST( s AS CHAR(50) (type: char(50))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Statistics: Num rows: 731 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 731 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                           output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out Thu Oct 30 12:31:47 2014
@@ -197,7 +197,6 @@ select ws_sold_date_sk, ws_sold_time_sk,
 PREHOOK: type: QUERY
 PREHOOK: Input: default@web_sales_txt
 PREHOOK: Output: default@web_sales
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table web_sales
 partition (ws_web_site_sk)
 select ws_sold_date_sk, ws_sold_time_sk, ws_ship_date_sk, ws_item_sk,
@@ -1255,21 +1254,21 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: web_sales
-                  Statistics: Num rows: 58981 Data size: 235968 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ws_order_number (type: int)
                     outputColumnNames: ws_order_number
-                    Statistics: Num rows: 58981 Data size: 235968 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(DISTINCT ws_order_number)
                       keys: ws_order_number (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 58981 Data size: 235968 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
-                        Statistics: Num rows: 58981 Data size: 235968 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_data_types.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_data_types.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_data_types.q.out Thu Oct 30 12:31:47 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out Thu Oct 30 12:31:47 2014
@@ -7,7 +7,6 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@decimal_vgby
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: CREATE TABLE decimal_vgby STORED AS ORC AS 
     SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, 
     CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2,
@@ -48,22 +47,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_vgby
-                  Statistics: Num rows: 559 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
                     outputColumnNames: cint, cdecimal1, cdecimal2
-                    Statistics: Num rows: 559 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), count()
                       keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
-                      Statistics: Num rows: 559 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 559 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14)), _col9 (type: bigint)
             Execution mode: vectorized
         Reducer 2 
@@ -73,17 +72,17 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
-                Statistics: Num rows: 279 Data size: 63714 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: (_col9 > 1) (type: boolean)
-                  Statistics: Num rows: 93 Data size: 21238 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Statistics: Num rows: 93 Data size: 21238 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 93 Data size: 21238 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.TextInputFormat
                           output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -153,22 +152,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_vgby
-                  Statistics: Num rows: 559 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
                     outputColumnNames: cint, cdecimal1, cdecimal2
-                    Statistics: Num rows: 559 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), avg(cdecimal1), stddev_pop(cdecimal1), stddev_samp(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), avg(cdecimal2), stddev_pop(cdecimal2), stddev_samp(cdecimal2), count()
                       keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-                      Statistics: Num rows: 559 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 559 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: bigint), _col9 (type: decimal(23,14)), _col10 (type: decimal(23,14)), _col11 (type: decimal(33,14)), _col12 (type: struct<count:bigint,sum:decimal(33,14),input:decimal(23,14)>), _col13 (type: struct<count:bigint,sum:double,variance:double>), _col14 (type: struct<count:bigint,sum:double,variance:double>), _col15 (type: bigint)
             Execution mode: vectorized
         Reducer 2 
@@ -178,17 +177,17 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-                Statistics: Num rows: 279 Data size: 63714 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: (_col15 > 1) (type: boolean)
-                  Statistics: Num rows: 93 Data size: 21238 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: decimal(24,14)), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: decimal(23,14)), _col10 (type: decimal(23,14)), _col11 (type: decimal(33,14)), _col12 (type: decimal(27,18)), _col13 (type: double), _col14 (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                    Statistics: Num rows: 93 Data size: 21238 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 93 Data size: 21238 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.TextInputFormat
                           output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out Thu Oct 30 12:31:47 2014
@@ -9,7 +9,6 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@decimal_mapjoin
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
 CREATE TABLE decimal_mapjoin STORED AS ORC AS 
@@ -46,28 +45,28 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: r
-                  Statistics: Num rows: 1100 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (cint is not null and (cint = 6981)) (type: boolean)
-                    Statistics: Num rows: 275 Data size: 31914 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3072 Data size: 541265 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: 6981 (type: int)
                       sort order: +
-                      Statistics: Num rows: 275 Data size: 31914 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3072 Data size: 541265 Basic stats: COMPLETE Column stats: NONE
                       value expressions: cdecimal2 (type: decimal(23,14))
             Execution mode: vectorized
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: l
-                  Statistics: Num rows: 1100 Data size: 127658 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (cint is not null and (cint = 6981)) (type: boolean)
-                    Statistics: Num rows: 275 Data size: 31914 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3072 Data size: 541265 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: 6981 (type: int)
                       sort order: +
-                      Statistics: Num rows: 275 Data size: 31914 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3072 Data size: 541265 Basic stats: COMPLETE Column stats: NONE
                       value expressions: cdecimal1 (type: decimal(20,10))
             Execution mode: vectorized
         Reducer 2 
@@ -79,14 +78,14 @@ STAGE PLANS:
                   0 {VALUE._col1}
                   1 {VALUE._col2}
                 outputColumnNames: _col1, _col9
-                Statistics: Num rows: 302 Data size: 35105 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3379 Data size: 595391 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 6981 (type: int), 6981 (type: int), _col1 (type: decimal(20,10)), _col9 (type: decimal(23,14))
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 302 Data size: 35105 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3379 Data size: 595391 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 302 Data size: 35105 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3379 Data size: 595391 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out Thu Oct 30 12:31:47 2014
@@ -84,7 +84,6 @@ PREHOOK: query: INSERT INTO TABLE vector
 PREHOOK: type: QUERY
 PREHOOK: Input: default@vectortab2k
 PREHOOK: Output: default@vectortab2korc
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vectortab2k
@@ -124,22 +123,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vectortab2korc
-                  Statistics: Num rows: 8043 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: bo (type: boolean), b (type: bigint)
                     outputColumnNames: bo, b
-                    Statistics: Num rows: 8043 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: max(b)
                       keys: bo (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 8043 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Statistics: Num rows: 8043 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
         Reducer 2 
@@ -149,15 +148,15 @@ STAGE PLANS:
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 4021 Data size: 48252 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: boolean), _col1 (type: bigint)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 4021 Data size: 48252 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: boolean)
                     sort order: -
-                    Statistics: Num rows: 4021 Data size: 48252 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: bigint)
             Execution mode: vectorized
         Reducer 3 
@@ -165,10 +164,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: bigint)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 4021 Data size: 48252 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 4021 Data size: 48252 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out Thu Oct 30 12:31:47 2014
@@ -82,7 +82,6 @@ PREHOOK: query: INSERT INTO TABLE over1k
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k
 PREHOOK: Output: default@over1korc
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k
@@ -118,14 +117,14 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: over1korc
-          Statistics: Num rows: 235 Data size: 23594 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: s (type: string), concat(concat('      ', s), '      ') (type: string), concat(concat('|', rtrim(concat(concat('      ', s), '      '))), '|') (type: string)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 235 Data size: 23594 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Limit
               Number of rows: 20
-              Statistics: Num rows: 20 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE
               ListSink
 
 PREHOOK: query: SELECT s AS `string`,

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out Thu Oct 30 12:31:47 2014
@@ -92,7 +92,6 @@ PREHOOK: query: INSERT INTO TABLE vector
 PREHOOK: type: QUERY
 PREHOOK: Input: default@vectortab2k
 PREHOOK: Output: default@vectortab2korc
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vectortab2k
@@ -143,14 +142,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vectortab2korc
-                  Statistics: Num rows: 731 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: CAST( t AS varchar(10)) (type: varchar(10)), CAST( si AS varchar(10)) (type: varchar(10)), CAST( i AS varchar(20)) (type: varchar(20)), CAST( b AS varchar(30)) (type: varchar(30)), CAST( f AS varchar(20)) (type: varchar(20)), CAST( d AS varchar(20)) (type: varchar(20)), CAST( s AS varchar(50)) (type: varchar(50))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Statistics: Num rows: 731 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 731 Data size: 96518 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                           output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out Thu Oct 30 12:31:47 2014
@@ -3,7 +3,6 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@date_decimal_test
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: CREATE TABLE date_decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (CAST (cint AS TIMESTAMP) AS DATE) AS cdate, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal FROM alltypesorc
 POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
@@ -23,17 +22,17 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: date_decimal_test
-          Statistics: Num rows: 482 Data size: 86777 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 12288 Data size: 1651260 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: (cint is not null and cdouble is not null) (type: boolean)
-            Statistics: Num rows: 121 Data size: 21784 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3072 Data size: 412815 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cdate (type: date), cdecimal (type: decimal(20,10))
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 121 Data size: 21784 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 3072 Data size: 412815 Basic stats: COMPLETE Column stats: NONE
               Limit
                 Number of rows: 10
-                Statistics: Num rows: 10 Data size: 1800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 10 Data size: 1340 Basic stats: COMPLETE Column stats: NONE
                 ListSink
 
 PREHOOK: query: SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_part.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_part.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_part.q.out Thu Oct 30 12:31:47 2014
@@ -10,7 +10,6 @@ PREHOOK: query: insert overwrite table a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@alltypesorc_part@ds=2011
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc limit 100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
@@ -31,7 +30,6 @@ PREHOOK: query: insert overwrite table a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@alltypesorc_part@ds=2012
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table alltypesorc_part partition (ds='2012') select * from alltypesorc limit 100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out?rev=1635477&r1=1635476&r2=1635477&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out Thu Oct 30 12:31:47 2014
@@ -10,7 +10,6 @@ PREHOOK: query: insert overwrite table a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@alltypesorc_part@ds=2011
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc limit 100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
@@ -31,7 +30,6 @@ PREHOOK: query: insert overwrite table a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@alltypesorc_part@ds=2012
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table alltypesorc_part partition (ds='2012') select * from alltypesorc limit 100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
@@ -67,28 +65,28 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: alltypesorc_part
-                  Statistics: Num rows: 706 Data size: 5648 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 200 Data size: 41576 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: (cdouble + 2) (type: double)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 706 Data size: 5648 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 200 Data size: 41576 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: double)
                       sort order: +
-                      Statistics: Num rows: 706 Data size: 5648 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 200 Data size: 41576 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double)
                 outputColumnNames: _col0
-                Statistics: Num rows: 706 Data size: 5648 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 200 Data size: 41576 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
-                  Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 2070 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 10 Data size: 2070 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat