You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2010/10/27 21:00:05 UTC

svn commit: r1028072 [3/7] - in /hive/trunk: ./ data/files/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/parse/ ql/src/java/org/apache/hadoop/hive/ql/plan/ ql/src/test/queries/clientnegative/ ql/src/test/queries/cli...

Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin4.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin4.q.out?rev=1028072&r1=1028071&r2=1028072&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin4.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin4.q.out Wed Oct 27 19:00:02 2010
@@ -104,9 +104,7 @@ STAGE PLANS:
           TableScan
             alias: b
             GatherStats: false
-            Common Join Operator
-              condition map:
-                   Inner Join 0 to 1
+            JDBM Sink Operator
               condition expressions:
                 0 {key} {value}
                 1 {value}
@@ -114,16 +112,15 @@ STAGE PLANS:
               keys:
                 0 [Column[key]]
                 1 [Column[key]]
-              outputColumnNames: _col0, _col1, _col5
               Position of Big Table: 0
       Bucket Mapjoin Context:
           Alias Bucket Base File Name Mapping:
             b {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt]}
           Alias Bucket File Name Mapping:
-            b {pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt=[pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt], pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt=[pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt]}
+            b {pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt=[pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt], pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt=[pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt]}
           Alias Bucket Output File Name Mapping:
-            pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt 0
-            pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt 1
+            pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt 0
+            pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt 1
 
   Stage: Stage-1
     Map Reduce
@@ -132,7 +129,7 @@ STAGE PLANS:
           TableScan
             alias: a
             GatherStats: false
-            Common Join Operator
+            Map Join Operator
               condition map:
                    Inner Join 0 to 1
               condition expressions:
@@ -165,9 +162,9 @@ STAGE PLANS:
                   File Output Operator
                     compressed: false
                     GlobalTableId: 1
-                    directory: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10002
+                    directory: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10002
                     NumFilesPerFileSink: 1
-                    Stats Publishing Key Prefix: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10000/
+                    Stats Publishing Key Prefix: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10000/
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -177,12 +174,12 @@ STAGE PLANS:
                           columns.types string:string:string
                           file.inputformat org.apache.hadoop.mapred.TextInputFormat
                           file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                          location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                           name bucketmapjoin_tmp_result
                           serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                           serialization.format 1
                           serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          transient_lastDdlTime 1287175837
+                          transient_lastDdlTime 1288205575
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: bucketmapjoin_tmp_result
                     TotalFiles: 1
@@ -192,9 +189,9 @@ STAGE PLANS:
         Map Reduce Local Work
       Needs Tagging: false
       Path -> Alias:
-        pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin [a]
+        pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin [a]
       Path -> Partition:
-        pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin 
+        pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin 
           Partition
             base file name: srcbucket_mapjoin
             input format: org.apache.hadoop.mapred.TextInputFormat
@@ -206,12 +203,12 @@ STAGE PLANS:
               columns.types int:string
               file.inputformat org.apache.hadoop.mapred.TextInputFormat
               file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin
+              location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin
               name srcbucket_mapjoin
               serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1287175830
+              transient_lastDdlTime 1288205572
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -223,12 +220,12 @@ STAGE PLANS:
                 columns.types int:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin
+                location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin
                 name srcbucket_mapjoin
                 serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1287175830
+                transient_lastDdlTime 1288205572
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: srcbucket_mapjoin
             name: srcbucket_mapjoin
@@ -240,14 +237,14 @@ STAGE PLANS:
     Move Operator
       files:
           hdfs directory: true
-          source: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10002
-          destination: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10000
+          source: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10002
+          destination: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10000
 
   Stage: Stage-0
     Move Operator
       tables:
           replace: true
-          source: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10000
+          source: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10000
           table:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -257,28 +254,28 @@ STAGE PLANS:
                 columns.types string:string:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                 name bucketmapjoin_tmp_result
                 serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1287175837
+                transient_lastDdlTime 1288205575
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucketmapjoin_tmp_result
-          tmp directory: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10001
+          tmp directory: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10001
 
   Stage: Stage-2
     Stats-Aggr Operator
-      Stats Aggregation Key Prefix: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10000/
+      Stats Aggregation Key Prefix: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10000/
 
   Stage: Stage-3
     Map Reduce
       Alias -> Map Operator Tree:
-        pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10002 
+        pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10002 
             File Output Operator
               compressed: false
               GlobalTableId: 0
-              directory: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10000
+              directory: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10000
               NumFilesPerFileSink: 1
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
@@ -289,12 +286,12 @@ STAGE PLANS:
                     columns.types string:string:string
                     file.inputformat org.apache.hadoop.mapred.TextInputFormat
                     file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                    location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                     name bucketmapjoin_tmp_result
                     serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    transient_lastDdlTime 1287175837
+                    transient_lastDdlTime 1288205575
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: bucketmapjoin_tmp_result
               TotalFiles: 1
@@ -302,9 +299,9 @@ STAGE PLANS:
               MultiFileSpray: false
       Needs Tagging: false
       Path -> Alias:
-        pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10002 [pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10002]
+        pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10002 [pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10002]
       Path -> Partition:
-        pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-50-37_713_8397106184600203842/-ext-10002 
+        pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-52-55_191_6284354312574445049/-ext-10002 
           Partition
             base file name: -ext-10002
             input format: org.apache.hadoop.mapred.TextInputFormat
@@ -315,12 +312,12 @@ STAGE PLANS:
               columns.types string:string:string
               file.inputformat org.apache.hadoop.mapred.TextInputFormat
               file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+              location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
               name bucketmapjoin_tmp_result
               serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1287175837
+              transient_lastDdlTime 1288205575
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -331,12 +328,12 @@ STAGE PLANS:
                 columns.types string:string:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                 name bucketmapjoin_tmp_result
                 serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1287175837
+                transient_lastDdlTime 1288205575
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucketmapjoin_tmp_result
             name: bucketmapjoin_tmp_result
@@ -362,11 +359,11 @@ POSTHOOK: Lineage: bucketmapjoin_tmp_res
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_tmp_result
-PREHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-50-50_080_3250358574001886106/-mr-10000
+PREHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-02_738_4577457943531711045/-mr-10000
 POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_tmp_result
-POSTHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-50-50_080_3250358574001886106/-mr-10000
+POSTHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-02_738_4577457943531711045/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin)b.FieldSchema(name:value, type:string, comment:null), ]
@@ -413,11 +410,11 @@ POSTHOOK: Lineage: bucketmapjoin_tmp_res
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_tmp_result
-PREHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-51-12_750_5190209184978381690/-mr-10000
+PREHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-16_429_5837890045100564683/-mr-10000
 POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_tmp_result
-POSTHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-51-12_750_5190209184978381690/-mr-10000
+POSTHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-16_429_5837890045100564683/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
@@ -456,14 +453,14 @@ on a.key = b.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_hash_result_1
 PREHOOK: Input: default@bucketmapjoin_hash_result_2
-PREHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-51-22_308_4772430048114309901/-mr-10000
+PREHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-24_119_5713666769406989600/-mr-10000
 POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
 from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
 on a.key = b.key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_hash_result_1
 POSTHOOK: Input: default@bucketmapjoin_hash_result_2
-POSTHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-51-22_308_4772430048114309901/-mr-10000
+POSTHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-24_119_5713666769406989600/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
@@ -525,9 +522,7 @@ STAGE PLANS:
           TableScan
             alias: a
             GatherStats: false
-            Common Join Operator
-              condition map:
-                   Inner Join 0 to 1
+            JDBM Sink Operator
               condition expressions:
                 0 {key} {value}
                 1 {value}
@@ -535,16 +530,15 @@ STAGE PLANS:
               keys:
                 0 [Column[key]]
                 1 [Column[key]]
-              outputColumnNames: _col0, _col1, _col5
               Position of Big Table: 1
       Bucket Mapjoin Context:
           Alias Bucket Base File Name Mapping:
             a {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt]}
           Alias Bucket File Name Mapping:
-            a {pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt=[pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt], pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt=[pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt]}
+            a {pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt=[pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt], pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt=[pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt]}
           Alias Bucket Output File Name Mapping:
-            pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt 0
-            pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt 1
+            pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt 0
+            pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt 1
 
   Stage: Stage-1
     Map Reduce
@@ -553,7 +547,7 @@ STAGE PLANS:
           TableScan
             alias: b
             GatherStats: false
-            Common Join Operator
+            Map Join Operator
               condition map:
                    Inner Join 0 to 1
               condition expressions:
@@ -586,9 +580,9 @@ STAGE PLANS:
                   File Output Operator
                     compressed: false
                     GlobalTableId: 1
-                    directory: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10002
+                    directory: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10002
                     NumFilesPerFileSink: 1
-                    Stats Publishing Key Prefix: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10000/
+                    Stats Publishing Key Prefix: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10000/
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -598,7 +592,7 @@ STAGE PLANS:
                           columns.types string:string:string
                           file.inputformat org.apache.hadoop.mapred.TextInputFormat
                           file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                          location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                           name bucketmapjoin_tmp_result
                           numFiles 1
                           numPartitions 0
@@ -607,7 +601,7 @@ STAGE PLANS:
                           serialization.format 1
                           serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                           totalSize 8983
-                          transient_lastDdlTime 1287175872
+                          transient_lastDdlTime 1288205596
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: bucketmapjoin_tmp_result
                     TotalFiles: 1
@@ -617,9 +611,9 @@ STAGE PLANS:
         Map Reduce Local Work
       Needs Tagging: false
       Path -> Alias:
-        pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin [b]
+        pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin [b]
       Path -> Partition:
-        pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin 
+        pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin 
           Partition
             base file name: srcbucket_mapjoin
             input format: org.apache.hadoop.mapred.TextInputFormat
@@ -631,12 +625,12 @@ STAGE PLANS:
               columns.types int:string
               file.inputformat org.apache.hadoop.mapred.TextInputFormat
               file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin
+              location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin
               name srcbucket_mapjoin
               serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1287175830
+              transient_lastDdlTime 1288205572
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -648,12 +642,12 @@ STAGE PLANS:
                 columns.types int:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/srcbucket_mapjoin
+                location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/srcbucket_mapjoin
                 name srcbucket_mapjoin
                 serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1287175830
+                transient_lastDdlTime 1288205572
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: srcbucket_mapjoin
             name: srcbucket_mapjoin
@@ -665,14 +659,14 @@ STAGE PLANS:
     Move Operator
       files:
           hdfs directory: true
-          source: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10002
-          destination: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10000
+          source: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10002
+          destination: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10000
 
   Stage: Stage-0
     Move Operator
       tables:
           replace: true
-          source: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10000
+          source: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10000
           table:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -682,7 +676,7 @@ STAGE PLANS:
                 columns.types string:string:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                 name bucketmapjoin_tmp_result
                 numFiles 1
                 numPartitions 0
@@ -691,23 +685,23 @@ STAGE PLANS:
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 totalSize 8983
-                transient_lastDdlTime 1287175872
+                transient_lastDdlTime 1288205596
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucketmapjoin_tmp_result
-          tmp directory: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10001
+          tmp directory: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10001
 
   Stage: Stage-2
     Stats-Aggr Operator
-      Stats Aggregation Key Prefix: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10000/
+      Stats Aggregation Key Prefix: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10000/
 
   Stage: Stage-3
     Map Reduce
       Alias -> Map Operator Tree:
-        pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10002 
+        pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10002 
             File Output Operator
               compressed: false
               GlobalTableId: 0
-              directory: pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10000
+              directory: pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10000
               NumFilesPerFileSink: 1
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
@@ -718,7 +712,7 @@ STAGE PLANS:
                     columns.types string:string:string
                     file.inputformat org.apache.hadoop.mapred.TextInputFormat
                     file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                    location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                     name bucketmapjoin_tmp_result
                     numFiles 1
                     numPartitions 0
@@ -727,7 +721,7 @@ STAGE PLANS:
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 8983
-                    transient_lastDdlTime 1287175872
+                    transient_lastDdlTime 1288205596
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: bucketmapjoin_tmp_result
               TotalFiles: 1
@@ -735,9 +729,9 @@ STAGE PLANS:
               MultiFileSpray: false
       Needs Tagging: false
       Path -> Alias:
-        pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10002 [pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10002]
+        pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10002 [pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10002]
       Path -> Partition:
-        pfile:/data/users/liyintang/trunk_os/build/ql/scratchdir/hive_2010-10-15_13-51-26_294_4777526524357803096/-ext-10002 
+        pfile:/data/users/njain/hive-commit1/build/ql/scratchdir/hive_2010-10-27_11-53-27_274_740197042204524306/-ext-10002 
           Partition
             base file name: -ext-10002
             input format: org.apache.hadoop.mapred.TextInputFormat
@@ -748,7 +742,7 @@ STAGE PLANS:
               columns.types string:string:string
               file.inputformat org.apache.hadoop.mapred.TextInputFormat
               file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+              location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
               name bucketmapjoin_tmp_result
               numFiles 1
               numPartitions 0
@@ -757,7 +751,7 @@ STAGE PLANS:
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               totalSize 8983
-              transient_lastDdlTime 1287175872
+              transient_lastDdlTime 1288205596
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -768,7 +762,7 @@ STAGE PLANS:
                 columns.types string:string:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/liyintang/trunk_os/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                location pfile:/data/users/njain/hive-commit1/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                 name bucketmapjoin_tmp_result
                 numFiles 1
                 numPartitions 0
@@ -777,7 +771,7 @@ STAGE PLANS:
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 totalSize 8983
-                transient_lastDdlTime 1287175872
+                transient_lastDdlTime 1288205596
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucketmapjoin_tmp_result
             name: bucketmapjoin_tmp_result
@@ -815,11 +809,11 @@ POSTHOOK: Lineage: bucketmapjoin_tmp_res
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_tmp_result
-PREHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-51-38_519_5227176789157682406/-mr-10000
+PREHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-34_796_7433622090937468625/-mr-10000
 POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_tmp_result
-POSTHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-51-38_519_5227176789157682406/-mr-10000
+POSTHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-34_796_7433622090937468625/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
@@ -902,11 +896,11 @@ POSTHOOK: Lineage: bucketmapjoin_tmp_res
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_tmp_result
-PREHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-51-59_233_5568038317944335995/-mr-10000
+PREHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-48_705_2477192820764361821/-mr-10000
 POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_tmp_result
-POSTHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-51-59_233_5568038317944335995/-mr-10000
+POSTHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-48_705_2477192820764361821/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
@@ -969,14 +963,14 @@ on a.key = b.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_hash_result_1
 PREHOOK: Input: default@bucketmapjoin_hash_result_2
-PREHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-52-08_602_2192616005048999053/-mr-10000
+PREHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-55_192_5237681680689076654/-mr-10000
 POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
 from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
 on a.key = b.key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_hash_result_1
 POSTHOOK: Input: default@bucketmapjoin_hash_result_2
-POSTHOOK: Output: file:/tmp/liyintang/hive_2010-10-15_13-52-08_602_2192616005048999053/-mr-10000
+POSTHOOK: Output: file:/tmp/njain/hive_2010-10-27_11-53-55_192_5237681680689076654/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]

Added: hive/trunk/ql/src/test/results/clientpositive/count.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/count.q.out?rev=1028072&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/count.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/count.q.out Wed Oct 27 19:00:02 2010
@@ -0,0 +1,577 @@
+PREHOOK: query: create table abcd (a int, b int, c int, d int)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table abcd (a int, b int, c int, d int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@abcd
+PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in4.txt' INTO TABLE abcd
+PREHOOK: type: LOAD
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/in4.txt' INTO TABLE abcd
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@abcd
+PREHOOK: query: select * from abcd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+PREHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-12_523_248548306762990690/-mr-10000
+POSTHOOK: query: select * from abcd
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+POSTHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-12_523_248548306762990690/-mr-10000
+NULL	35	23	6
+10	1000	50	1
+100	100	10	3
+12	NULL	80	2
+10	100	NULL	5
+10	100	45	4
+12	100	75	7
+PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF abcd)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL a)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL d)))) (TOK_GROUPBY (TOK_TABLE_OR_COL a))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        abcd 
+          TableScan
+            alias: abcd
+            Select Operator
+              expressions:
+                    expr: a
+                    type: int
+                    expr: b
+                    type: int
+                    expr: c
+                    type: int
+                    expr: d
+                    type: int
+              outputColumnNames: a, b, c, d
+              Group By Operator
+                aggregations:
+                      expr: count(DISTINCT b)
+                      expr: count(DISTINCT c)
+                      expr: sum(d)
+                bucketGroup: false
+                keys:
+                      expr: a
+                      type: int
+                      expr: b
+                      type: int
+                      expr: c
+                      type: int
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Reduce Output Operator
+                  key expressions:
+                        expr: _col0
+                        type: int
+                        expr: _col1
+                        type: int
+                        expr: _col2
+                        type: int
+                  sort order: +++
+                  Map-reduce partition columns:
+                        expr: _col0
+                        type: int
+                  tag: -1
+                  value expressions:
+                        expr: _col3
+                        type: bigint
+                        expr: _col4
+                        type: bigint
+                        expr: _col5
+                        type: bigint
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(DISTINCT KEY._col1:0._col0)
+                expr: count(DISTINCT KEY._col1:1._col0)
+                expr: sum(VALUE._col2)
+          bucketGroup: false
+          keys:
+                expr: KEY._col0
+                type: int
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: int
+                  expr: _col1
+                  type: bigint
+                  expr: _col2
+                  type: bigint
+                  expr: _col3
+                  type: bigint
+            outputColumnNames: _col0, _col1, _col2, _col3
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+PREHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-12_925_5142539455043997266/-mr-10000
+POSTHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+POSTHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-12_925_5142539455043997266/-mr-10000
+NULL	1	1	6
+10	2	2	10
+12	1	2	9
+100	1	1	3
+PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF abcd)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count
  (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        abcd 
+          TableScan
+            alias: abcd
+            Select Operator
+              expressions:
+                    expr: a
+                    type: int
+                    expr: b
+                    type: int
+                    expr: c
+                    type: int
+                    expr: d
+                    type: int
+              outputColumnNames: a, b, c, d
+              Group By Operator
+                aggregations:
+                      expr: count(1)
+                      expr: count()
+                      expr: count(a)
+                      expr: count(b)
+                      expr: count(c)
+                      expr: count(d)
+                      expr: count(DISTINCT a)
+                      expr: count(DISTINCT b)
+                      expr: count(DISTINCT c)
+                      expr: count(DISTINCT d)
+                      expr: count(DISTINCT a, b)
+                      expr: count(DISTINCT b, c)
+                      expr: count(DISTINCT c, d)
+                      expr: count(DISTINCT a, d)
+                      expr: count(DISTINCT a, c)
+                      expr: count(DISTINCT b, d)
+                      expr: count(DISTINCT a, b, c)
+                      expr: count(DISTINCT b, c, d)
+                      expr: count(DISTINCT a, c, d)
+                      expr: count(DISTINCT a, b, d)
+                      expr: count(DISTINCT a, b, c, d)
+                bucketGroup: false
+                keys:
+                      expr: a
+                      type: int
+                      expr: b
+                      type: int
+                      expr: c
+                      type: int
+                      expr: d
+                      type: int
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
+                Reduce Output Operator
+                  key expressions:
+                        expr: _col0
+                        type: int
+                        expr: _col1
+                        type: int
+                        expr: _col2
+                        type: int
+                        expr: _col3
+                        type: int
+                  sort order: ++++
+                  tag: -1
+                  value expressions:
+                        expr: _col4
+                        type: bigint
+                        expr: _col5
+                        type: bigint
+                        expr: _col6
+                        type: bigint
+                        expr: _col7
+                        type: bigint
+                        expr: _col8
+                        type: bigint
+                        expr: _col9
+                        type: bigint
+                        expr: _col10
+                        type: bigint
+                        expr: _col11
+                        type: bigint
+                        expr: _col12
+                        type: bigint
+                        expr: _col13
+                        type: bigint
+                        expr: _col14
+                        type: bigint
+                        expr: _col15
+                        type: bigint
+                        expr: _col16
+                        type: bigint
+                        expr: _col17
+                        type: bigint
+                        expr: _col18
+                        type: bigint
+                        expr: _col19
+                        type: bigint
+                        expr: _col20
+                        type: bigint
+                        expr: _col21
+                        type: bigint
+                        expr: _col22
+                        type: bigint
+                        expr: _col23
+                        type: bigint
+                        expr: _col24
+                        type: bigint
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(VALUE._col0)
+                expr: count(VALUE._col1)
+                expr: count(VALUE._col2)
+                expr: count(VALUE._col3)
+                expr: count(VALUE._col4)
+                expr: count(VALUE._col5)
+                expr: count(DISTINCT KEY._col0:0._col0)
+                expr: count(DISTINCT KEY._col0:1._col0)
+                expr: count(DISTINCT KEY._col0:2._col0)
+                expr: count(DISTINCT KEY._col0:3._col0)
+                expr: count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1)
+                expr: count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1)
+                expr: count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1)
+                expr: count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1)
+                expr: count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1)
+                expr: count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1)
+                expr: count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2)
+                expr: count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2)
+                expr: count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2)
+                expr: count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2)
+                expr: count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY._col0:14._col3)
+          bucketGroup: false
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: bigint
+                  expr: _col1
+                  type: bigint
+                  expr: _col2
+                  type: bigint
+                  expr: _col3
+                  type: bigint
+                  expr: _col4
+                  type: bigint
+                  expr: _col5
+                  type: bigint
+                  expr: _col6
+                  type: bigint
+                  expr: _col7
+                  type: bigint
+                  expr: _col8
+                  type: bigint
+                  expr: _col9
+                  type: bigint
+                  expr: _col10
+                  type: bigint
+                  expr: _col11
+                  type: bigint
+                  expr: _col12
+                  type: bigint
+                  expr: _col13
+                  type: bigint
+                  expr: _col14
+                  type: bigint
+                  expr: _col15
+                  type: bigint
+                  expr: _col16
+                  type: bigint
+                  expr: _col17
+                  type: bigint
+                  expr: _col18
+                  type: bigint
+                  expr: _col19
+                  type: bigint
+                  expr: _col20
+                  type: bigint
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+PREHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-15_550_8269692453214848200/-mr-10000
+POSTHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+POSTHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-15_550_8269692453214848200/-mr-10000
+7	7	6	6	6	7	3	3	6	7	4	5	6	6	5	6	4	5	5	5	4
+PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF abcd)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL a)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL d)))) (TOK_GROUPBY (TOK_TABLE_OR_COL a))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        abcd 
+          TableScan
+            alias: abcd
+            Select Operator
+              expressions:
+                    expr: a
+                    type: int
+                    expr: b
+                    type: int
+                    expr: c
+                    type: int
+                    expr: d
+                    type: int
+              outputColumnNames: a, b, c, d
+              Reduce Output Operator
+                key expressions:
+                      expr: a
+                      type: int
+                      expr: b
+                      type: int
+                      expr: c
+                      type: int
+                sort order: +++
+                Map-reduce partition columns:
+                      expr: a
+                      type: int
+                tag: -1
+                value expressions:
+                      expr: d
+                      type: int
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(DISTINCT KEY._col1:0._col0)
+                expr: count(DISTINCT KEY._col1:1._col0)
+                expr: sum(VALUE._col0)
+          bucketGroup: false
+          keys:
+                expr: KEY._col0
+                type: int
+          mode: complete
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: int
+                  expr: _col1
+                  type: bigint
+                  expr: _col2
+                  type: bigint
+                  expr: _col3
+                  type: bigint
+            outputColumnNames: _col0, _col1, _col2, _col3
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+PREHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-18_476_7776659377634244729/-mr-10000
+POSTHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+POSTHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-18_476_7776659377634244729/-mr-10000
+NULL	1	1	6
+10	2	2	10
+12	1	2	9
+100	1	1	3
+PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF abcd)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count
  (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        abcd 
+          TableScan
+            alias: abcd
+            Select Operator
+              expressions:
+                    expr: a
+                    type: int
+                    expr: b
+                    type: int
+                    expr: c
+                    type: int
+                    expr: d
+                    type: int
+              outputColumnNames: a, b, c, d
+              Reduce Output Operator
+                key expressions:
+                      expr: a
+                      type: int
+                      expr: b
+                      type: int
+                      expr: c
+                      type: int
+                      expr: d
+                      type: int
+                sort order: ++++
+                tag: -1
+                value expressions:
+                      expr: 1
+                      type: int
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(VALUE._col0)
+                expr: count()
+                expr: count(KEY._col0:14._col0)
+                expr: count(KEY._col0:14._col1)
+                expr: count(KEY._col0:14._col2)
+                expr: count(KEY._col0:14._col3)
+                expr: count(DISTINCT KEY._col0:0._col0)
+                expr: count(DISTINCT KEY._col0:1._col0)
+                expr: count(DISTINCT KEY._col0:2._col0)
+                expr: count(DISTINCT KEY._col0:3._col0)
+                expr: count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1)
+                expr: count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1)
+                expr: count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1)
+                expr: count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1)
+                expr: count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1)
+                expr: count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1)
+                expr: count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2)
+                expr: count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2)
+                expr: count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2)
+                expr: count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2)
+                expr: count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY._col0:14._col3)
+          bucketGroup: false
+          mode: complete
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: bigint
+                  expr: _col1
+                  type: bigint
+                  expr: _col2
+                  type: bigint
+                  expr: _col3
+                  type: bigint
+                  expr: _col4
+                  type: bigint
+                  expr: _col5
+                  type: bigint
+                  expr: _col6
+                  type: bigint
+                  expr: _col7
+                  type: bigint
+                  expr: _col8
+                  type: bigint
+                  expr: _col9
+                  type: bigint
+                  expr: _col10
+                  type: bigint
+                  expr: _col11
+                  type: bigint
+                  expr: _col12
+                  type: bigint
+                  expr: _col13
+                  type: bigint
+                  expr: _col14
+                  type: bigint
+                  expr: _col15
+                  type: bigint
+                  expr: _col16
+                  type: bigint
+                  expr: _col17
+                  type: bigint
+                  expr: _col18
+                  type: bigint
+                  expr: _col19
+                  type: bigint
+                  expr: _col20
+                  type: bigint
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+PREHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-20_818_3596074897102517680/-mr-10000
+POSTHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+POSTHOOK: Output: file:/tmp/amarsri/hive_2010-10-19_03-08-20_818_3596074897102517680/-mr-10000
+7	7	6	6	6	7	3	3	6	7	4	5	6	6	5	6	4	5	5	5	4

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby2.q.out?rev=1028072&r1=1028071&r2=1028072&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby2.q.out Wed Oct 27 19:00:02 2010
@@ -50,8 +50,8 @@ STAGE PLANS:
       Reduce Operator Tree:
         Group By Operator
           aggregations:
-                expr: count(DISTINCT KEY._col1)
-                expr: sum(KEY._col1)
+                expr: count(DISTINCT KEY._col1:0._col0)
+                expr: sum(KEY._col1:0._col0)
           bucketGroup: false
           keys:
                 expr: KEY._col0

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby2_map.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby2_map.q.out?rev=1028072&r1=1028071&r2=1028072&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby2_map.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby2_map.q.out Wed Oct 27 19:00:02 2010
@@ -64,7 +64,7 @@ STAGE PLANS:
       Reduce Operator Tree:
         Group By Operator
           aggregations:
-                expr: count(DISTINCT KEY._col1)
+                expr: count(DISTINCT KEY._col1:0._col0)
                 expr: sum(VALUE._col1)
           bucketGroup: false
           keys:

Added: hive/trunk/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out?rev=1028072&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out Wed Oct 27 19:00:02 2010
@@ -0,0 +1,169 @@
+PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@dest1
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB dest1)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: key, value
+              Group By Operator
+                aggregations:
+                      expr: count(DISTINCT substr(value, 5))
+                      expr: sum(substr(value, 5))
+                      expr: sum(DISTINCT substr(value, 5))
+                      expr: count(value)
+                bucketGroup: false
+                keys:
+                      expr: substr(key, 1, 1)
+                      type: string
+                      expr: substr(value, 5)
+                      type: string
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Reduce Output Operator
+                  key expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                  sort order: ++
+                  Map-reduce partition columns:
+                        expr: _col0
+                        type: string
+                  tag: -1
+                  value expressions:
+                        expr: _col2
+                        type: bigint
+                        expr: _col3
+                        type: double
+                        expr: _col4
+                        type: double
+                        expr: _col5
+                        type: bigint
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(DISTINCT KEY._col1:0._col0)
+                expr: sum(VALUE._col1)
+                expr: sum(DISTINCT KEY._col1:1._col0)
+                expr: count(VALUE._col3)
+          bucketGroup: false
+          keys:
+                expr: KEY._col0
+                type: string
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col1
+                  type: bigint
+                  expr: concat(_col0, _col2)
+                  type: string
+                  expr: _col3
+                  type: double
+                  expr: _col4
+                  type: bigint
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Select Operator
+              expressions:
+                    expr: _col0
+                    type: string
+                    expr: UDFToInteger(_col1)
+                    type: int
+                    expr: _col2
+                    type: string
+                    expr: UDFToInteger(_col3)
+                    type: int
+                    expr: UDFToInteger(_col4)
+                    type: int
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: dest1
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: SELECT dest1.* FROM dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+PREHOOK: Output: file:/tmp/amarsri/hive_2010-10-20_03-03-34_053_4042312084657375262/-mr-10000
+POSTHOOK: query: SELECT dest1.* FROM dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+POSTHOOK: Output: file:/tmp/amarsri/hive_2010-10-20_03-03-34_053_4042312084657375262/-mr-10000
+POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+0	1	00.0	0	3
+1	71	116414.0	10044	115
+2	69	225571.0	15780	111
+3	62	332004.0	20119	99
+4	74	452763.0	30965	124
+5	6	5397.0	278	10
+6	5	6398.0	331	6
+7	6	7735.0	447	10
+8	8	8762.0	595	10
+9	7	91047.0	577	12

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby2_map_skew.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby2_map_skew.q.out?rev=1028072&r1=1028071&r2=1028072&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby2_map_skew.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby2_map_skew.q.out Wed Oct 27 19:00:02 2010
@@ -67,7 +67,7 @@ STAGE PLANS:
       Reduce Operator Tree:
         Group By Operator
           aggregations:
-                expr: count(DISTINCT KEY._col1)
+                expr: count(DISTINCT KEY._col1:0._col0)
                 expr: sum(VALUE._col1)
           bucketGroup: false
           keys:

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew.q.out?rev=1028072&r1=1028071&r2=1028072&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew.q.out Wed Oct 27 19:00:02 2010
@@ -47,8 +47,8 @@ STAGE PLANS:
       Reduce Operator Tree:
         Group By Operator
           aggregations:
-                expr: count(DISTINCT KEY._col1)
-                expr: sum(KEY._col1)
+                expr: count(DISTINCT KEY._col1:0._col0)
+                expr: sum(KEY._col1:0._col0)
           bucketGroup: false
           keys:
                 expr: KEY._col0

Added: hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out?rev=1028072&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out Wed Oct 27 19:00:02 2010
@@ -0,0 +1,149 @@
+PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@dest_g2
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB dest_g2)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: key, value
+              Reduce Output Operator
+                key expressions:
+                      expr: substr(key, 1, 1)
+                      type: string
+                      expr: substr(value, 5)
+                      type: string
+                sort order: ++
+                Map-reduce partition columns:
+                      expr: substr(key, 1, 1)
+                      type: string
+                tag: -1
+                value expressions:
+                      expr: value
+                      type: string
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(DISTINCT KEY._col1:0._col0)
+                expr: sum(KEY._col1:1._col0)
+                expr: sum(DISTINCT KEY._col1:1._col0)
+                expr: count(VALUE._col0)
+          bucketGroup: false
+          keys:
+                expr: KEY._col0
+                type: string
+          mode: complete
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col1
+                  type: bigint
+                  expr: concat(_col0, _col2)
+                  type: string
+                  expr: _col3
+                  type: double
+                  expr: _col4
+                  type: bigint
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Select Operator
+              expressions:
+                    expr: _col0
+                    type: string
+                    expr: UDFToInteger(_col1)
+                    type: int
+                    expr: _col2
+                    type: string
+                    expr: UDFToInteger(_col3)
+                    type: int
+                    expr: UDFToInteger(_col4)
+                    type: int
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: dest_g2
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest_g2
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest_g2
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest_g2
+POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_g2.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_g2.c4 EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: SELECT dest_g2.* FROM dest_g2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_g2
+PREHOOK: Output: file:/tmp/amarsri/hive_2010-10-20_03-04-17_707_2990757298865083943/-mr-10000
+POSTHOOK: query: SELECT dest_g2.* FROM dest_g2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_g2
+POSTHOOK: Output: file:/tmp/amarsri/hive_2010-10-20_03-04-17_707_2990757298865083943/-mr-10000
+POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_g2.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_g2.c4 EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+0	1	00.0	0	3
+1	71	116414.0	10044	115
+2	69	225571.0	15780	111
+3	62	332004.0	20119	99
+4	74	452763.0	30965	124
+5	6	5397.0	278	10
+6	5	6398.0	331	6
+7	6	7735.0	447	10
+8	8	8762.0	595	10
+9	7	91047.0	577	12

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby3.q.out?rev=1028072&r1=1028071&r2=1028072&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby3.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby3.q.out Wed Oct 27 19:00:02 2010
@@ -62,15 +62,15 @@ STAGE PLANS:
       Reduce Operator Tree:
         Group By Operator
           aggregations:
-                expr: sum(KEY._col0)
-                expr: avg(KEY._col0)
-                expr: avg(DISTINCT KEY._col0)
-                expr: max(KEY._col0)
-                expr: min(KEY._col0)
-                expr: std(KEY._col0)
-                expr: stddev_samp(KEY._col0)
-                expr: variance(KEY._col0)
-                expr: var_samp(KEY._col0)
+                expr: sum(KEY._col0:0._col0)
+                expr: avg(KEY._col0:0._col0)
+                expr: avg(DISTINCT KEY._col0:0._col0)
+                expr: max(KEY._col0:0._col0)
+                expr: min(KEY._col0:0._col0)
+                expr: std(KEY._col0:0._col0)
+                expr: stddev_samp(KEY._col0:0._col0)
+                expr: variance(KEY._col0:0._col0)
+                expr: var_samp(KEY._col0:0._col0)
           bucketGroup: false
           mode: partial1
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby3_map.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby3_map.q.out?rev=1028072&r1=1028071&r2=1028072&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby3_map.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby3_map.q.out Wed Oct 27 19:00:02 2010
@@ -96,7 +96,7 @@ STAGE PLANS:
           aggregations:
                 expr: sum(VALUE._col0)
                 expr: avg(VALUE._col1)
-                expr: avg(DISTINCT KEY._col0)
+                expr: avg(DISTINCT KEY._col0:0._col0)
                 expr: max(VALUE._col3)
                 expr: min(VALUE._col4)
                 expr: std(VALUE._col5)