You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/09/22 19:59:43 UTC

svn commit: r1626859 - in /hive/branches/spark: itests/src/test/resources/testconfiguration.properties ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out

Author: xuefu
Date: Mon Sep 22 17:59:42 2014
New Revision: 1626859

URL: http://svn.apache.org/r1626859
Log:
HIVE-7842: Enable qtest load_dyn_part1.q [Spark Branch] (Venki via Xuefu)

Modified:
    hive/branches/spark/itests/src/test/resources/testconfiguration.properties
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out

Modified: hive/branches/spark/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/src/test/resources/testconfiguration.properties?rev=1626859&r1=1626858&r2=1626859&view=diff
==============================================================================
--- hive/branches/spark/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/spark/itests/src/test/resources/testconfiguration.properties Mon Sep 22 17:59:42 2014
@@ -405,8 +405,9 @@ spark.query.files=alter_merge_orc.q \
   join_1to1.q \
   join_casesensitive.q \
   limit_pushdown.q \
-  load_dyn_part2.q.out \
-  load_dyn_part3.q.out \
+  load_dyn_part1.q \
+  load_dyn_part2.q \
+  load_dyn_part3.q \
   mapreduce1.q \
   mapreduce2.q \
   merge1.q \

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out?rev=1626859&r1=1626858&r2=1626859&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out Mon Sep 22 17:59:42 2014
@@ -55,23 +55,23 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string), hr (type: string)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       sort order: 
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Extract
-                Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out?rev=1626859&r1=1626858&r2=1626859&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out Mon Sep 22 17:59:42 2014
@@ -61,24 +61,24 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
                     outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col2 (type: string), _col3 (type: string)
                       sort order: ++
                       Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
-                      Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Extract
-                Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat