You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/08/24 00:51:41 UTC

svn commit: r1620091 [1/2] - in /hive/branches/spark: itests/src/test/resources/ ql/src/java/org/apache/hadoop/hive/ql/exec/spark/ ql/src/test/results/clientpositive/spark/

Author: brock
Date: Sat Aug 23 22:51:40 2014
New Revision: 1620091

URL: http://svn.apache.org/r1620091
Log:
HIVE-7791 - Enable tests on Spark branch (1) [Sparch Branch] (Brock reviewed by Szehon)

Added:
    hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_orc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/create_merge_compressed.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ctas.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/custom_input_output_format.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
Modified:
    hive/branches/spark/itests/src/test/resources/testconfiguration.properties
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java

Modified: hive/branches/spark/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/src/test/resources/testconfiguration.properties?rev=1620091&r1=1620090&r2=1620091&view=diff
==============================================================================
--- hive/branches/spark/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/spark/itests/src/test/resources/testconfiguration.properties Sat Aug 23 22:51:40 2014
@@ -310,6 +310,16 @@ minimr.query.negative.files=cluster_task
   udf_local_resource.q
 
 spark.query.files=spark_test.q \
+   alter_merge_orc.q, \
+   alter_merge_stats_orc.q, \
+   bucket2.q, \
+   bucket3.q, \
+   bucket4.q, \
+   count.q, \
+   create_merge_compressed.q, \
+   ctas.q, \
+   custom_input_output_format.q, \
+   disable_merge_for_bucketing.q, \
    avro_compression_enabled_native.q \
    avro_decimal_native.q \
    join_casesensitive.q \

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java?rev=1620091&r1=1620090&r2=1620091&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java Sat Aug 23 22:51:40 2014
@@ -29,6 +29,7 @@ import org.apache.commons.lang.StringUti
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Partitioner;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -224,6 +225,14 @@ public class SparkPlanGenerator {
     JobConf cloned = new JobConf(jobConf);
     // Make sure we'll use a different plan path from the original one
     HiveConf.setVar(cloned, HiveConf.ConfVars.PLAN, "");
+    try {
+      cloned.setPartitionerClass((Class<? extends Partitioner>) (Class.forName(HiveConf.getVar(cloned,
+        HiveConf.ConfVars.HIVEPARTITIONER))));
+    } catch (ClassNotFoundException e) {
+      String msg = "Could not find partitioner class: " + e.getMessage() + " which is specified by: " +
+        HiveConf.ConfVars.HIVEPARTITIONER.varname;
+      throw new IllegalArgumentException(msg, e);
+    }
     if (work instanceof MapWork) {
       List<Path> inputPaths = Utilities.getInputPaths(cloned, (MapWork) work, scratchDir, context, false);
       Utilities.setInputPaths(cloned, inputPaths);

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_orc.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_orc.q.out?rev=1620091&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_orc.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_orc.q.out Sat Aug 23 22:51:40 2014
@@ -0,0 +1,269 @@
+PREHOOK: query: create table src_orc_merge_test(key int, value string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc_merge_test
+POSTHOOK: query: create table src_orc_merge_test(key int, value string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc_merge_test
+PREHOOK: query: insert overwrite table src_orc_merge_test select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table src_orc_merge_test select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test
+POSTHOOK: Lineage: src_orc_merge_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table src_orc_merge_test select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test
+POSTHOOK: Lineage: src_orc_merge_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table src_orc_merge_test select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test
+POSTHOOK: Lineage: src_orc_merge_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show table extended like `src_orc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:3
+totalFileSize:7380
+maxFileSize:2460
+minFileSize:2460
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from src_orc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+1500
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+390273	108631194210
+PREHOOK: query: alter table src_orc_merge_test concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@src_orc_merge_test
+PREHOOK: Output: default@src_orc_merge_test
+POSTHOOK: query: alter table src_orc_merge_test concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@src_orc_merge_test
+POSTHOOK: Output: default@src_orc_merge_test
+PREHOOK: query: show table extended like `src_orc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:1
+totalFileSize:7059
+maxFileSize:7059
+minFileSize:7059
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from src_orc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+1500
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+390273	108631194210
+PREHOOK: query: create table src_orc_merge_test_part(key int, value string) partitioned by (ds string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: query: create table src_orc_merge_test_part(key int, value string) partitioned by (ds string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc_merge_test_part
+PREHOOK: query: alter table src_orc_merge_test_part add partition (ds='2011')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: query: alter table src_orc_merge_test_part add partition (ds='2011')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+PREHOOK: query: insert overwrite table src_orc_merge_test_part partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2011
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table src_orc_merge_test_part partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2011
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table src_orc_merge_test_part partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2011
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table src_orc_merge_test_part partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show table extended like `src_orc_merge_test_part` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test_part` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test_part
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:3
+totalFileSize:7380
+maxFileSize:2460
+minFileSize:2460
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from src_orc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+1500
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+390273	108631194210
+PREHOOK: query: alter table src_orc_merge_test_part partition (ds='2011') concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: query: alter table src_orc_merge_test_part partition (ds='2011') concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+PREHOOK: query: show table extended like `src_orc_merge_test_part` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test_part` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test_part
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:1
+totalFileSize:7059
+maxFileSize:7059
+minFileSize:7059
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from src_orc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+1500
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+390273	108631194210
+PREHOOK: query: drop table src_orc_merge_test
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_orc_merge_test
+PREHOOK: Output: default@src_orc_merge_test
+POSTHOOK: query: drop table src_orc_merge_test
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_orc_merge_test
+POSTHOOK: Output: default@src_orc_merge_test
+PREHOOK: query: drop table src_orc_merge_test_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: query: drop table src_orc_merge_test_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Output: default@src_orc_merge_test_part

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out?rev=1620091&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out Sat Aug 23 22:51:40 2014
@@ -0,0 +1,388 @@
+PREHOOK: query: create table src_orc_merge_test_stat(key int, value string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: create table src_orc_merge_test_stat(key int, value string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: insert overwrite table src_orc_merge_test_stat select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_stat
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table src_orc_merge_test_stat select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: Lineage: src_orc_merge_test_stat.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_stat.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_stat select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_stat
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table src_orc_merge_test_stat select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: Lineage: src_orc_merge_test_stat.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_stat.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_stat select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_stat
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table src_orc_merge_test_stat select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: Lineage: src_orc_merge_test_stat.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_stat.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show table extended like `src_orc_merge_test_stat`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test_stat`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test_stat
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:3
+totalFileSize:7380
+maxFileSize:2460
+minFileSize:2460
+#### A masked pattern was here ####
+
+PREHOOK: query: desc extended src_orc_merge_test_stat
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: query: desc extended src_orc_merge_test_stat
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: desc formatted  src_orc_merge_test_stat
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: query: desc formatted  src_orc_merge_test_stat
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Protect Mode:       	None                	 
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
+	numFiles            	3                   
+	numRows             	1500                
+	rawDataSize         	141000              
+	totalSize           	7380                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: alter table src_orc_merge_test_stat concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@src_orc_merge_test_stat
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: alter table src_orc_merge_test_stat concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: desc formatted src_orc_merge_test_stat
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: query: desc formatted src_orc_merge_test_stat
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Protect Mode:       	None                	 
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
+	numFiles            	1                   
+	numRows             	1500                
+	rawDataSize         	141000              
+	totalSize           	7059                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: create table src_orc_merge_test_part_stat(key int, value string) partitioned by (ds string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: query: create table src_orc_merge_test_part_stat(key int, value string) partitioned by (ds string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc_merge_test_part_stat
+PREHOOK: query: alter table src_orc_merge_test_part_stat add partition (ds='2011')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: query: alter table src_orc_merge_test_part_stat add partition (ds='2011')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+PREHOOK: query: insert overwrite table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show table extended like `src_orc_merge_test_part_stat` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test_part_stat` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test_part_stat
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:3
+totalFileSize:7380
+maxFileSize:2460
+minFileSize:2460
+#### A masked pattern was here ####
+
+PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2011]              	 
+Database:           	default             	 
+Table:              	src_orc_merge_test_part_stat	 
+#### A masked pattern was here ####
+Protect Mode:       	None                	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
+	numFiles            	3                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
+	totalSize           	7380                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2011]              	 
+Database:           	default             	 
+Table:              	src_orc_merge_test_part_stat	 
+#### A masked pattern was here ####
+Protect Mode:       	None                	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
+	numFiles            	3                   
+	numRows             	1500                
+	rawDataSize         	141000              
+	totalSize           	7380                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: alter table src_orc_merge_test_part_stat partition (ds='2011') concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: query: alter table src_orc_merge_test_part_stat partition (ds='2011') concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2011]              	 
+Database:           	default             	 
+Table:              	src_orc_merge_test_part_stat	 
+#### A masked pattern was here ####
+Protect Mode:       	None                	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
+	numFiles            	1                   
+	numRows             	1500                
+	rawDataSize         	141000              
+	totalSize           	7059                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table src_orc_merge_test_stat
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_orc_merge_test_stat
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: drop table src_orc_merge_test_stat
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: drop table src_orc_merge_test_part_stat
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: query: drop table src_orc_merge_test_part_stat
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out?rev=1620091&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out Sat Aug 23 22:51:40 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out?rev=1620091&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out Sat Aug 23 22:51:40 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out?rev=1620091&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out Sat Aug 23 22:51:40 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out?rev=1620091&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out Sat Aug 23 22:51:40 2014
@@ -0,0 +1,304 @@
+PREHOOK: query: create table abcd (a int, b int, c int, d int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@abcd
+POSTHOOK: query: create table abcd (a int, b int, c int, d int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@abcd
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@abcd
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@abcd
+PREHOOK: query: select * from abcd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select * from abcd
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+NULL	35	23	6
+10	1000	50	1
+100	100	10	3
+12	NULL	80	2
+10	100	NULL	5
+10	100	45	4
+12	100	75	7
+PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: abcd
+                  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int), b (type: int), c (type: int), d (type: int)
+                    outputColumnNames: a, b, c, d
+                    Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(DISTINCT b), count(DISTINCT c), sum(d)
+                      keys: a (type: int), b (type: int), c (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                      Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+                        sort order: +++
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col5 (type: bigint)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0), sum(VALUE._col2)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+NULL	1	1	6
+10	2	2	10
+12	1	2	9
+100	1	1	3
+PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: abcd
+                  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int), b (type: int), c (type: int), d (type: int)
+                    outputColumnNames: a, b, c, d
+                    Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1), count(), count(a), count(b), count(c), count(d), count(DISTINCT a), count(DISTINCT b), count(DISTINCT c), count(DISTINCT d), count(DISTINCT a, b), count(DISTINCT b, c), count(DISTINCT c, d), count(DISTINCT a, d), count(DISTINCT a, c), count(DISTINCT b, d), count(DISTINCT a, b, c), count(DISTINCT b, c, d), count(DISTINCT a, c, d), count(DISTINCT a, b, d), count(DISTINCT a, b, c, d)
+                      keys: a (type: int), b (type: int), c (type: int), d (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
+                      Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: int)
+                        sort order: ++++
+                        Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0), count(VALUE._col1), count(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY.
 _col0:14._col3)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: bigint), _col19 (type: bigint), _col20 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                  Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+7	7	6	6	6	7	3	3	6	7	4	5	6	6	5	6	4	5	5	5	4
+PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: abcd
+                  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int), b (type: int), c (type: int), d (type: int)
+                    outputColumnNames: a, b, c, d
+                    Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: a (type: int), b (type: int), c (type: int)
+                      sort order: +++
+                      Map-reduce partition columns: a (type: int)
+                      Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: d (type: int)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0), sum(VALUE._col0)
+                keys: KEY._col0 (type: int)
+                mode: complete
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+NULL	1	1	6
+10	2	2	10
+12	1	2	9
+100	1	1	3
+PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: abcd
+                  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int), b (type: int), c (type: int), d (type: int)
+                    outputColumnNames: a, b, c, d
+                    Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: a (type: int), b (type: int), c (type: int), d (type: int)
+                      sort order: ++++
+                      Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(1), count(), count(KEY._col0:0._col0), count(KEY._col0:1._col0), count(KEY._col0:2._col0), count(KEY._col0:3._col0), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, K
 EY._col0:14._col3)
+                mode: complete
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: bigint), _col19 (type: bigint), _col20 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                  Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+7	7	6	6	6	7	3	3	6	7	4	5	6	6	5	6	4	5	5	5	4

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/create_merge_compressed.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/create_merge_compressed.q.out?rev=1620091&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/create_merge_compressed.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/create_merge_compressed.q.out Sat Aug 23 22:51:40 2014
@@ -0,0 +1,140 @@
+PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tgt_rc_merge_test
+PREHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: default@tgt_rc_merge_test
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: default@tgt_rc_merge_test
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: show table extended like `tgt_rc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `tgt_rc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:tgt_rc_merge_test
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:2
+totalFileSize:342
+maxFileSize:171
+minFileSize:171
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from tgt_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from tgt_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+10
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+46	-751895388
+PREHOOK: query: alter table tgt_rc_merge_test concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@tgt_rc_merge_test
+PREHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: query: alter table tgt_rc_merge_test concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@tgt_rc_merge_test
+POSTHOOK: Output: default@tgt_rc_merge_test
+PREHOOK: query: show table extended like `tgt_rc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `tgt_rc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:tgt_rc_merge_test
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:1
+totalFileSize:243
+maxFileSize:243
+minFileSize:243
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from tgt_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from tgt_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+10
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+46	-751895388
+PREHOOK: query: drop table src_rc_merge_test
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: drop table src_rc_merge_test
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: drop table tgt_rc_merge_test
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@tgt_rc_merge_test
+PREHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: query: drop table tgt_rc_merge_test
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@tgt_rc_merge_test
+POSTHOOK: Output: default@tgt_rc_merge_test