You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2015/11/20 08:24:40 UTC

[2/2] hive git commit: HIVE-12450: OrcFileMergeOperator does not use correct compression buffer size (Prasanth Jayachandran reviewed by Sergey Shelukhin)

HIVE-12450: OrcFileMergeOperator does not use correct compression buffer size (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/97cb0c6e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/97cb0c6e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/97cb0c6e

Branch: refs/heads/master
Commit: 97cb0c6e021ae4a1556fd4ef0760b636530e84b4
Parents: dbb54b9
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Fri Nov 20 01:24:25 2015 -0600
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Fri Nov 20 01:24:25 2015 -0600

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   2 +
 .../hive/ql/exec/OrcFileMergeOperator.java      |  20 +-
 .../hive/ql/io/orc/OrcFileKeyWrapper.java       |   6 +-
 .../test/queries/clientpositive/orc_merge10.q   | 110 +++
 .../test/queries/clientpositive/orc_merge11.q   |  43 ++
 .../results/clientpositive/orc_merge10.q.out    | 644 +++++++++++++++++
 .../results/clientpositive/orc_merge11.q.out    | 316 +++++++++
 .../clientpositive/tez/orc_merge10.q.out        | 709 +++++++++++++++++++
 .../clientpositive/tez/orc_merge11.q.out        | 316 +++++++++
 9 files changed, 2155 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/97cb0c6e/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index a33e720..b86dd19 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -155,6 +155,8 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   orc_merge7.q,\
   orc_merge8.q,\
   orc_merge9.q,\
+  orc_merge10.q,\
+  orc_merge11.q,\
   orc_merge_incompat1.q,\
   orc_merge_incompat2.q,\
   orc_vectorization_ppd.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/97cb0c6e/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
index 2ea6154..99a3e8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
@@ -46,7 +46,7 @@ public class OrcFileMergeOperator extends
   // does not merge, the file will be put into incompatible file set and will
   // not be merged.
   CompressionKind compression = null;
-  long compressBuffSize = 0;
+  int compressBuffSize = 0;
   OrcFile.Version version;
   int columnCount = 0;
   int rowIndexStride = 0;
@@ -104,13 +104,17 @@ public class OrcFileMergeOperator extends
         columnCount = k.getTypes().get(0).getSubtypesCount();
         rowIndexStride = k.getRowIndexStride();
 
-        // block size and stripe size will be from config
-        outWriter = OrcFile.createWriter(outPath,
-            OrcFile.writerOptions(jc)
-                .compress(compression)
-                .version(version)
-                .rowIndexStride(rowIndexStride)
-                .inspector(reader.getObjectInspector()));
+        OrcFile.WriterOptions options = OrcFile.writerOptions(jc)
+            .compress(compression)
+            .version(version)
+            .rowIndexStride(rowIndexStride)
+            .inspector(reader.getObjectInspector());
+        // compression buffer size should only be set if compression is enabled
+        if (compression != CompressionKind.NONE) {
+          options.bufferSize(compressBuffSize);
+        }
+
+        outWriter = OrcFile.createWriter(outPath, options);
         if (isLogDebugEnabled) {
           LOG.info("ORC merge file output path: " + outPath);
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/97cb0c6e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileKeyWrapper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileKeyWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileKeyWrapper.java
index a62fc1e..8eda37f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileKeyWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileKeyWrapper.java
@@ -34,7 +34,7 @@ public class OrcFileKeyWrapper implements WritableComparable<OrcFileKeyWrapper>
 
   private Path inputPath;
   private CompressionKind compression;
-  private long compressBufferSize;
+  private int compressBufferSize;
   private List<OrcProto.Type> types;
   private int rowIndexStride;
   private OrcFile.Version version;
@@ -64,11 +64,11 @@ public class OrcFileKeyWrapper implements WritableComparable<OrcFileKeyWrapper>
     this.rowIndexStride = rowIndexStride;
   }
 
-  public long getCompressBufferSize() {
+  public int getCompressBufferSize() {
     return compressBufferSize;
   }
 
-  public void setCompressBufferSize(long compressBufferSize) {
+  public void setCompressBufferSize(int compressBufferSize) {
     this.compressBufferSize = compressBufferSize;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/97cb0c6e/ql/src/test/queries/clientpositive/orc_merge10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge10.q b/ql/src/test/queries/clientpositive/orc_merge10.q
new file mode 100644
index 0000000..7f81947
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/orc_merge10.q
@@ -0,0 +1,110 @@
+set hive.explain.user=false;
+set hive.merge.orcfile.stripe.level=false;
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.optimize.sort.dynamic.partition=false;
+set mapred.min.split.size=1000;
+set mapred.max.split.size=2000;
+set tez.am.grouping.split-count=2;
+set tez.grouping.split-count=2;
+set hive.merge.tezfiles=false;
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+-- SORT_QUERY_RESULTS
+
+DROP TABLE orcfile_merge1;
+DROP TABLE orcfile_merge1b;
+DROP TABLE orcfile_merge1c;
+
+CREATE TABLE orcfile_merge1 (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096");
+CREATE TABLE orcfile_merge1b (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096");
+CREATE TABLE orcfile_merge1c (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096");
+
+-- merge disabled
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src;
+
+INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/ds=1/part=0/;
+
+set hive.merge.tezfiles=true;
+set hive.merge.mapfiles=true;
+set hive.merge.mapredfiles=true;
+-- auto-merge slow way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src;
+
+INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b/ds=1/part=0/;
+
+set hive.merge.orcfile.stripe.level=true;
+-- auto-merge fast way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src;
+
+INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c/ds=1/part=0/;
+
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+-- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t;
+
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1b WHERE ds='1'
+) t;
+
+select count(*) from orcfile_merge1;
+select count(*) from orcfile_merge1b;
+
+set tez.am.grouping.split-count=1;
+set tez.grouping.split-count=1;
+-- concatenate
+explain ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE;
+ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/ds=1/part=0/;
+
+-- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1c WHERE ds='1'
+) t;
+
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t;
+
+select count(*) from orcfile_merge1;
+select count(*) from orcfile_merge1c;
+
+SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecOrcFileDump;
+select * from orcfile_merge1 where ds='1' and part='0' limit 1;
+select * from orcfile_merge1c where ds='1' and part='0' limit 1;
+
+DROP TABLE orcfile_merge1;
+DROP TABLE orcfile_merge1b;
+DROP TABLE orcfile_merge1c;

http://git-wip-us.apache.org/repos/asf/hive/blob/97cb0c6e/ql/src/test/queries/clientpositive/orc_merge11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge11.q b/ql/src/test/queries/clientpositive/orc_merge11.q
new file mode 100644
index 0000000..91f1991
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/orc_merge11.q
@@ -0,0 +1,43 @@
+DROP TABLE orcfile_merge1;
+DROP TABLE orc_split_elim;
+
+create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
+
+load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim;
+load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim;
+
+create table orcfile_merge1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096");
+
+insert overwrite table orcfile_merge1 select * from orc_split_elim;
+insert into table orcfile_merge1 select * from orc_split_elim;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/;
+
+set hive.merge.tezfiles=true;
+set hive.merge.mapfiles=true;
+set hive.merge.mapredfiles=true;
+set hive.merge.orcfile.stripe.level=true;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+set tez.am.grouping.split-count=1;
+set tez.grouping.split-count=1;
+set hive.exec.orc.default.buffer.size=120;
+
+SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecOrcFileDump;
+select * from orcfile_merge1 limit 1;
+SET hive.exec.post.hooks=;
+
+-- concatenate
+ALTER TABLE  orcfile_merge1 CONCATENATE;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/;
+
+select count(*) from orc_split_elim;
+-- will have double the number of rows
+select count(*) from orcfile_merge1;
+
+SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecOrcFileDump;
+select * from orcfile_merge1 limit 1;
+SET hive.exec.post.hooks=;
+
+DROP TABLE orc_split_elim;
+DROP TABLE orcfile_merge1;

http://git-wip-us.apache.org/repos/asf/hive/blob/97cb0c6e/ql/src/test/results/clientpositive/orc_merge10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge10.q.out b/ql/src/test/results/clientpositive/orc_merge10.q.out
new file mode 100644
index 0000000..89d0bf6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/orc_merge10.q.out
@@ -0,0 +1,644 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orcfile_merge1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge1b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge1b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge1c
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge1c
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1
+POSTHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1
+PREHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1b
+POSTHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1b
+PREHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1c
+POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1c
+PREHOOK: query: -- merge disabled
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- merge disabled
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                    serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                    name: default.orcfile_merge1
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+            part 
+          replace: true
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.orcfile_merge1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: -- auto-merge slow way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- auto-merge slow way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                    serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                    name: default.orcfile_merge1b
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+            part 
+          replace: true
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.orcfile_merge1b
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                  serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                  name: default.orcfile_merge1b
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                  serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                  name: default.orcfile_merge1b
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1b@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- auto-merge fast way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- auto-merge fast way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                    serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                    name: default.orcfile_merge1c
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+            part 
+          replace: true
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.orcfile_merge1c
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+  Stage: Stage-3
+    Merge File Operator
+      Map Operator Tree:
+          ORC File Merge Operator
+      merge level: stripe
+      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+  Stage: Stage-5
+    Merge File Operator
+      Map Operator Tree:
+          ORC File Merge Operator
+      merge level: stripe
+      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1c@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1b WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1b WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: select count(*) from orcfile_merge1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from orcfile_merge1b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: -- concatenate
+explain ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
+PREHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: query: -- concatenate
+explain ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
+POSTHOOK: type: ALTER_PARTITION_MERGE
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+  Stage-1 depends on stages: Stage-0
+  Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-0
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          partition:
+            ds 1
+            part 0
+          replace: true
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.orcfile_merge1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Output: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: query: ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1c WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1c WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: select count(*) from orcfile_merge1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from orcfile_merge1c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select * from orcfile_merge1 where ds='1' and part='0' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+#### A masked pattern was here ####
+-- BEGIN ORC FILE DUMP --
+#### A masked pattern was here ####
+File Version: 0.12 with HIVE_4243
+Rows: 242
+Compression: SNAPPY
+Compression size: 4096
+Type: struct<key:int,value:string>
+
+Stripe Statistics:
+  Stripe 1:
+    Column 0: count: 152 hasNull: false
+    Column 1: count: 152 hasNull: false min: 0 max: 497 sum: 38034
+    Column 2: count: 152 hasNull: false min: val_0 max: val_97 sum: 1034
+  Stripe 2:
+    Column 0: count: 90 hasNull: false
+    Column 1: count: 90 hasNull: false min: 0 max: 495 sum: 22736
+    Column 2: count: 90 hasNull: false min: val_0 max: val_86 sum: 612
+
+File Statistics:
+  Column 0: count: 242 hasNull: false
+  Column 1: count: 242 hasNull: false min: 0 max: 497 sum: 60770
+  Column 2: count: 242 hasNull: false min: val_0 max: val_97 sum: 1646
+
+Stripes:
+  Stripe: offset: 3 data: 988 rows: 152 tail: 72 index: 77
+    Stream: column 0 section ROW_INDEX start: 3 length 12
+    Stream: column 1 section ROW_INDEX start: 15 length 28
+    Stream: column 2 section ROW_INDEX start: 43 length 37
+    Stream: column 1 section DATA start: 80 length 309
+    Stream: column 2 section DATA start: 389 length 157
+    Stream: column 2 section LENGTH start: 546 length 60
+    Stream: column 2 section DICTIONARY_DATA start: 606 length 462
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DICTIONARY_V2[114]
+    Row group indices for column 1:
+      Entry 0: count: 152 hasNull: false min: 0 max: 497 sum: 38034 positions: 0,0,0
+  Stripe: offset: 1140 data: 616 rows: 90 tail: 61 index: 76
+    Stream: column 0 section ROW_INDEX start: 1140 length 11
+    Stream: column 1 section ROW_INDEX start: 1151 length 27
+    Stream: column 2 section ROW_INDEX start: 1178 length 38
+    Stream: column 1 section DATA start: 1216 length 185
+    Stream: column 2 section DATA start: 1401 length 377
+    Stream: column 2 section LENGTH start: 1778 length 54
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 90 hasNull: false min: 0 max: 495 sum: 22736 positions: 0,0,0
+
+File length: 2137 bytes
+Padding length: 0 bytes
+Padding ratio: 0%
+-- END ORC FILE DUMP --
+172	val_172	1	0
+PREHOOK: query: select * from orcfile_merge1c where ds='1' and part='0' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+#### A masked pattern was here ####
+-- BEGIN ORC FILE DUMP --
+#### A masked pattern was here ####
+File Version: 0.12 with HIVE_4243
+Rows: 242
+Compression: SNAPPY
+Compression size: 4096
+Type: struct<key:int,value:string>
+
+Stripe Statistics:
+  Stripe 1:
+    Column 0: count: 152 hasNull: false
+    Column 1: count: 152 hasNull: false min: 0 max: 497 sum: 38034
+    Column 2: count: 152 hasNull: false min: val_0 max: val_97 sum: 1034
+  Stripe 2:
+    Column 0: count: 90 hasNull: false
+    Column 1: count: 90 hasNull: false min: 0 max: 495 sum: 22736
+    Column 2: count: 90 hasNull: false min: val_0 max: val_86 sum: 612
+
+File Statistics:
+  Column 0: count: 242 hasNull: false
+  Column 1: count: 242 hasNull: false min: 0 max: 497 sum: 60770
+  Column 2: count: 242 hasNull: false min: val_0 max: val_97 sum: 1646
+
+Stripes:
+  Stripe: offset: 3 data: 988 rows: 152 tail: 72 index: 77
+    Stream: column 0 section ROW_INDEX start: 3 length 12
+    Stream: column 1 section ROW_INDEX start: 15 length 28
+    Stream: column 2 section ROW_INDEX start: 43 length 37
+    Stream: column 1 section DATA start: 80 length 309
+    Stream: column 2 section DATA start: 389 length 157
+    Stream: column 2 section LENGTH start: 546 length 60
+    Stream: column 2 section DICTIONARY_DATA start: 606 length 462
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DICTIONARY_V2[114]
+    Row group indices for column 1:
+      Entry 0: count: 152 hasNull: false min: 0 max: 497 sum: 38034 positions: 0,0,0
+  Stripe: offset: 1140 data: 616 rows: 90 tail: 61 index: 76
+    Stream: column 0 section ROW_INDEX start: 1140 length 11
+    Stream: column 1 section ROW_INDEX start: 1151 length 27
+    Stream: column 2 section ROW_INDEX start: 1178 length 38
+    Stream: column 1 section DATA start: 1216 length 185
+    Stream: column 2 section DATA start: 1401 length 377
+    Stream: column 2 section LENGTH start: 1778 length 54
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 90 hasNull: false min: 0 max: 495 sum: 22736 positions: 0,0,0
+
+File length: 2137 bytes
+Padding length: 0 bytes
+Padding ratio: 0%
+-- END ORC FILE DUMP --
+172	val_172	1	0
+PREHOOK: query: DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Output: default@orcfile_merge1
+PREHOOK: query: DROP TABLE orcfile_merge1b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Output: default@orcfile_merge1b
+PREHOOK: query: DROP TABLE orcfile_merge1c
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Output: default@orcfile_merge1c

http://git-wip-us.apache.org/repos/asf/hive/blob/97cb0c6e/ql/src/test/results/clientpositive/orc_merge11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge11.q.out b/ql/src/test/results/clientpositive/orc_merge11.q.out
new file mode 100644
index 0000000..da608db
--- /dev/null
+++ b/ql/src/test/results/clientpositive/orc_merge11.q.out
@@ -0,0 +1,316 @@
+PREHOOK: query: DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orc_split_elim
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_split_elim
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_split_elim
+POSTHOOK: query: create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_split_elim
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_split_elim
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_split_elim
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_split_elim
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_split_elim
+PREHOOK: query: create table orcfile_merge1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1
+POSTHOOK: query: create table orcfile_merge1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1
+PREHOOK: query: insert overwrite table orcfile_merge1 select * from orc_split_elim
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+PREHOOK: Output: default@orcfile_merge1
+POSTHOOK: query: insert overwrite table orcfile_merge1 select * from orc_split_elim
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+POSTHOOK: Output: default@orcfile_merge1
+POSTHOOK: Lineage: orcfile_merge1.decimal1 SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1.string1 SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1.subtype SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1.ts SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1.userid SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orcfile_merge1 select * from orc_split_elim
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+PREHOOK: Output: default@orcfile_merge1
+POSTHOOK: query: insert into table orcfile_merge1 select * from orc_split_elim
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+POSTHOOK: Output: default@orcfile_merge1
+POSTHOOK: Lineage: orcfile_merge1.decimal1 SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1.string1 SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1.subtype SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1.ts SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1.userid SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:userid, type:bigint, comment:null), ]
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orcfile_merge1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+#### A masked pattern was here ####
+-- BEGIN ORC FILE DUMP --
+#### A masked pattern was here ####
+File Version: 0.12 with HIVE_4243
+Rows: 50000
+Compression: ZLIB
+Compression size: 4096
+Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(10,0),ts:timestamp>
+
+Stripe Statistics:
+  Stripe 1:
+    Column 0: count: 50000 hasNull: false
+    Column 1: count: 50000 hasNull: false min: 2 max: 100 sum: 4999238
+    Column 2: count: 50000 hasNull: false min: bar max: zebra sum: 249980
+    Column 3: count: 50000 hasNull: false min: 0.8 max: 80.0 sum: 400102.80000000005
+    Column 4: count: 50000 hasNull: false min: 0 max: 6 sum: 32
+    Column 5: count: 50000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0
+
+File Statistics:
+  Column 0: count: 50000 hasNull: false
+  Column 1: count: 50000 hasNull: false min: 2 max: 100 sum: 4999238
+  Column 2: count: 50000 hasNull: false min: bar max: zebra sum: 249980
+  Column 3: count: 50000 hasNull: false min: 0.8 max: 80.0 sum: 400102.80000000005
+  Column 4: count: 50000 hasNull: false min: 0 max: 6 sum: 32
+  Column 5: count: 50000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0
+
+Stripes:
+  Stripe: offset: 3 data: 10104 rows: 50000 tail: 117 index: 509
+    Stream: column 0 section ROW_INDEX start: 3 length 17
+    Stream: column 1 section ROW_INDEX start: 20 length 85
+    Stream: column 2 section ROW_INDEX start: 105 length 87
+    Stream: column 3 section ROW_INDEX start: 192 length 111
+    Stream: column 4 section ROW_INDEX start: 303 length 108
+    Stream: column 5 section ROW_INDEX start: 411 length 101
+    Stream: column 1 section DATA start: 512 length 871
+    Stream: column 2 section DATA start: 1383 length 362
+    Stream: column 2 section LENGTH start: 1745 length 8
+    Stream: column 2 section DICTIONARY_DATA start: 1753 length 23
+    Stream: column 3 section DATA start: 1776 length 5167
+    Stream: column 4 section DATA start: 6943 length 524
+    Stream: column 4 section SECONDARY start: 7467 length 118
+    Stream: column 5 section DATA start: 7585 length 2913
+    Stream: column 5 section SECONDARY start: 10498 length 118
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DICTIONARY_V2[6]
+    Encoding column 3: DIRECT
+    Encoding column 4: DIRECT_V2
+    Encoding column 5: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 133,1071,391
+      Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 292,2147,391
+      Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 453,3223,391
+      Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 683,203,391
+
+File length: 11071 bytes
+Padding length: 0 bytes
+Padding ratio: 0%
+-- END ORC FILE DUMP --
+-- BEGIN ORC FILE DUMP --
+#### A masked pattern was here ####
+File Version: 0.12 with HIVE_4243
+Rows: 50000
+Compression: ZLIB
+Compression size: 4096
+Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(10,0),ts:timestamp>
+
+Stripe Statistics:
+  Stripe 1:
+    Column 0: count: 50000 hasNull: false
+    Column 1: count: 50000 hasNull: false min: 2 max: 100 sum: 4999238
+    Column 2: count: 50000 hasNull: false min: bar max: zebra sum: 249980
+    Column 3: count: 50000 hasNull: false min: 0.8 max: 80.0 sum: 400102.80000000005
+    Column 4: count: 50000 hasNull: false min: 0 max: 6 sum: 32
+    Column 5: count: 50000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0
+
+File Statistics:
+  Column 0: count: 50000 hasNull: false
+  Column 1: count: 50000 hasNull: false min: 2 max: 100 sum: 4999238
+  Column 2: count: 50000 hasNull: false min: bar max: zebra sum: 249980
+  Column 3: count: 50000 hasNull: false min: 0.8 max: 80.0 sum: 400102.80000000005
+  Column 4: count: 50000 hasNull: false min: 0 max: 6 sum: 32
+  Column 5: count: 50000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0
+
+Stripes:
+  Stripe: offset: 3 data: 10104 rows: 50000 tail: 117 index: 509
+    Stream: column 0 section ROW_INDEX start: 3 length 17
+    Stream: column 1 section ROW_INDEX start: 20 length 85
+    Stream: column 2 section ROW_INDEX start: 105 length 87
+    Stream: column 3 section ROW_INDEX start: 192 length 111
+    Stream: column 4 section ROW_INDEX start: 303 length 108
+    Stream: column 5 section ROW_INDEX start: 411 length 101
+    Stream: column 1 section DATA start: 512 length 871
+    Stream: column 2 section DATA start: 1383 length 362
+    Stream: column 2 section LENGTH start: 1745 length 8
+    Stream: column 2 section DICTIONARY_DATA start: 1753 length 23
+    Stream: column 3 section DATA start: 1776 length 5167
+    Stream: column 4 section DATA start: 6943 length 524
+    Stream: column 4 section SECONDARY start: 7467 length 118
+    Stream: column 5 section DATA start: 7585 length 2913
+    Stream: column 5 section SECONDARY start: 10498 length 118
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DICTIONARY_V2[6]
+    Encoding column 3: DIRECT
+    Encoding column 4: DIRECT_V2
+    Encoding column 5: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 133,1071,391
+      Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 292,2147,391
+      Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 453,3223,391
+      Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 683,203,391
+
+File length: 11071 bytes
+Padding length: 0 bytes
+Padding ratio: 0%
+-- END ORC FILE DUMP --
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: -- concatenate
+ALTER TABLE  orcfile_merge1 CONCATENATE
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Output: default@orcfile_merge1
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from orc_split_elim
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+50000
+PREHOOK: query: -- will have double the number of rows
+select count(*) from orcfile_merge1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+#### A masked pattern was here ####
+100000
+PREHOOK: query: select * from orcfile_merge1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+#### A masked pattern was here ####
+-- BEGIN ORC FILE DUMP --
+#### A masked pattern was here ####
+File Version: 0.12 with HIVE_4243
+Rows: 100000
+Compression: ZLIB
+Compression size: 4096
+Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(10,0),ts:timestamp>
+
+Stripe Statistics:
+  Stripe 1:
+    Column 0: count: 50000 hasNull: false
+    Column 1: count: 50000 hasNull: false min: 2 max: 100 sum: 4999238
+    Column 2: count: 50000 hasNull: false min: bar max: zebra sum: 249980
+    Column 3: count: 50000 hasNull: false min: 0.8 max: 80.0 sum: 400102.80000000005
+    Column 4: count: 50000 hasNull: false min: 0 max: 6 sum: 32
+    Column 5: count: 50000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0
+  Stripe 2:
+    Column 0: count: 50000 hasNull: false
+    Column 1: count: 50000 hasNull: false min: 2 max: 100 sum: 4999238
+    Column 2: count: 50000 hasNull: false min: bar max: zebra sum: 249980
+    Column 3: count: 50000 hasNull: false min: 0.8 max: 80.0 sum: 400102.80000000005
+    Column 4: count: 50000 hasNull: false min: 0 max: 6 sum: 32
+    Column 5: count: 50000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0
+
+File Statistics:
+  Column 0: count: 100000 hasNull: false
+  Column 1: count: 100000 hasNull: false min: 2 max: 100 sum: 9998476
+  Column 2: count: 100000 hasNull: false min: bar max: zebra sum: 499960
+  Column 3: count: 100000 hasNull: false min: 0.8 max: 80.0 sum: 800205.6000000001
+  Column 4: count: 100000 hasNull: false min: 0 max: 6 sum: 64
+  Column 5: count: 100000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0
+
+Stripes:
+  Stripe: offset: 3 data: 10104 rows: 50000 tail: 117 index: 509
+    Stream: column 0 section ROW_INDEX start: 3 length 17
+    Stream: column 1 section ROW_INDEX start: 20 length 85
+    Stream: column 2 section ROW_INDEX start: 105 length 87
+    Stream: column 3 section ROW_INDEX start: 192 length 111
+    Stream: column 4 section ROW_INDEX start: 303 length 108
+    Stream: column 5 section ROW_INDEX start: 411 length 101
+    Stream: column 1 section DATA start: 512 length 871
+    Stream: column 2 section DATA start: 1383 length 362
+    Stream: column 2 section LENGTH start: 1745 length 8
+    Stream: column 2 section DICTIONARY_DATA start: 1753 length 23
+    Stream: column 3 section DATA start: 1776 length 5167
+    Stream: column 4 section DATA start: 6943 length 524
+    Stream: column 4 section SECONDARY start: 7467 length 118
+    Stream: column 5 section DATA start: 7585 length 2913
+    Stream: column 5 section SECONDARY start: 10498 length 118
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DICTIONARY_V2[6]
+    Encoding column 3: DIRECT
+    Encoding column 4: DIRECT_V2
+    Encoding column 5: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 133,1071,391
+      Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 292,2147,391
+      Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 453,3223,391
+      Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 683,203,391
+  Stripe: offset: 10733 data: 10104 rows: 50000 tail: 117 index: 509
+    Stream: column 0 section ROW_INDEX start: 10733 length 17
+    Stream: column 1 section ROW_INDEX start: 10750 length 85
+    Stream: column 2 section ROW_INDEX start: 10835 length 87
+    Stream: column 3 section ROW_INDEX start: 10922 length 111
+    Stream: column 4 section ROW_INDEX start: 11033 length 108
+    Stream: column 5 section ROW_INDEX start: 11141 length 101
+    Stream: column 1 section DATA start: 11242 length 871
+    Stream: column 2 section DATA start: 12113 length 362
+    Stream: column 2 section LENGTH start: 12475 length 8
+    Stream: column 2 section DICTIONARY_DATA start: 12483 length 23
+    Stream: column 3 section DATA start: 12506 length 5167
+    Stream: column 4 section DATA start: 17673 length 524
+    Stream: column 4 section SECONDARY start: 18197 length 118
+    Stream: column 5 section DATA start: 18315 length 2913
+    Stream: column 5 section SECONDARY start: 21228 length 118
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DICTIONARY_V2[6]
+    Encoding column 3: DIRECT
+    Encoding column 4: DIRECT_V2
+    Encoding column 5: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 133,1071,391
+      Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 292,2147,391
+      Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 453,3223,391
+      Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 683,203,391
+
+File length: 21814 bytes
+Padding length: 0 bytes
+Padding ratio: 0%
+-- END ORC FILE DUMP --
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: DROP TABLE orc_split_elim
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_split_elim
+PREHOOK: Output: default@orc_split_elim
+PREHOOK: query: DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Output: default@orcfile_merge1

http://git-wip-us.apache.org/repos/asf/hive/blob/97cb0c6e/ql/src/test/results/clientpositive/tez/orc_merge10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_merge10.q.out b/ql/src/test/results/clientpositive/tez/orc_merge10.q.out
new file mode 100644
index 0000000..d7ea13a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/orc_merge10.q.out
@@ -0,0 +1,709 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orcfile_merge1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge1b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge1b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge1c
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge1c
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1
+POSTHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1
+PREHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1b
+POSTHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1b
+PREHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1c
+POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) 
+    PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties("orc.compress"="SNAPPY","orc.compress.size"="4096")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1c
+PREHOOK: query: -- merge disabled
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- merge disabled
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                          serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                          name: default.orcfile_merge1
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+            part 
+          replace: true
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.orcfile_merge1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: -- auto-merge slow way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- auto-merge slow way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
+  Stage-5
+  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+  Stage-4
+  Stage-6
+  Stage-7 depends on stages: Stage-6
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                          serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                          name: default.orcfile_merge1b
+
+  Stage: Stage-8
+    Conditional Operator
+
+  Stage: Stage-5
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+            part 
+          replace: true
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.orcfile_merge1b
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+  Stage: Stage-4
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        File Merge 
+            Map Operator Tree:
+                TableScan
+                  File Output Operator
+                    compressed: false
+                    table:
+                        input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                        serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                        name: default.orcfile_merge1b
+
+  Stage: Stage-6
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        File Merge 
+            Map Operator Tree:
+                TableScan
+                  File Output Operator
+                    compressed: false
+                    table:
+                        input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                        serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                        name: default.orcfile_merge1b
+
+  Stage: Stage-7
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1b@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- auto-merge fast way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- auto-merge fast way
+EXPLAIN
+    INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+        SELECT key, value, PMOD(HASH(key), 2) as part
+        FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
+  Stage-5
+  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+  Stage-4
+  Stage-6
+  Stage-7 depends on stages: Stage-6
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                          serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                          name: default.orcfile_merge1c
+
+  Stage: Stage-8
+    Conditional Operator
+
+  Stage: Stage-5
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+            part 
+          replace: true
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.orcfile_merge1c
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+  Stage: Stage-4
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        File Merge 
+          Merge File Operator
+            Map Operator Tree:
+                ORC File Merge Operator
+            merge level: stripe
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+  Stage: Stage-6
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        File Merge 
+          Merge File Operator
+            Map Operator Tree:
+                ORC File Merge Operator
+            merge level: stripe
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+  Stage: Stage-7
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1c@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+    SELECT key, value, PMOD(HASH(key), 2) as part
+    FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1b WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1b WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: select count(*) from orcfile_merge1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from orcfile_merge1b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: -- concatenate
+explain ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
+PREHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: query: -- concatenate
+explain ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
+POSTHOOK: type: ALTER_PARTITION_MERGE
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+  Stage-1 depends on stages: Stage-0
+  Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-0
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          partition:
+            ds 1
+            part 0
+          replace: true
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.orcfile_merge1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Output: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: query: ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1c WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1c WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+    FROM orcfile_merge1 WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: select count(*) from orcfile_merge1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from orcfile_merge1c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select * from orcfile_merge1 where ds='1' and part='0' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+#### A masked pattern was here ####
+-- BEGIN ORC FILE DUMP --
+#### A masked pattern was here ####
+File Version: 0.12 with HIVE_4243
+Rows: 242
+Compression: SNAPPY
+Compression size: 4096
+Type: struct<key:int,value:string>
+
+Stripe Statistics:
+  Stripe 1:
+    Column 0: count: 90 hasNull: false
+    Column 1: count: 90 hasNull: false min: 0 max: 495 sum: 22736
+    Column 2: count: 90 hasNull: false min: val_0 max: val_86 sum: 612
+  Stripe 2:
+    Column 0: count: 78 hasNull: false
+    Column 1: count: 78 hasNull: false min: 0 max: 497 sum: 18371
+    Column 2: count: 78 hasNull: false min: val_0 max: val_95 sum: 529
+  Stripe 3:
+    Column 0: count: 74 hasNull: false
+    Column 1: count: 74 hasNull: false min: 2 max: 493 sum: 19663
+    Column 2: count: 74 hasNull: false min: val_105 max: val_97 sum: 505
+
+File Statistics:
+  Column 0: count: 242 hasNull: false
+  Column 1: count: 242 hasNull: false min: 0 max: 497 sum: 60770
+  Column 2: count: 242 hasNull: false min: val_0 max: val_97 sum: 1646
+
+Stripes:
+  Stripe: offset: 3 data: 616 rows: 90 tail: 61 index: 76
+    Stream: column 0 section ROW_INDEX start: 3 length 11
+    Stream: column 1 section ROW_INDEX start: 14 length 27
+    Stream: column 2 section ROW_INDEX start: 41 length 38
+    Stream: column 1 section DATA start: 79 length 185
+    Stream: column 2 section DATA start: 264 length 377
+    Stream: column 2 section LENGTH start: 641 length 54
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 90 hasNull: false min: 0 max: 495 sum: 22736 positions: 0,0,0
+  Stripe: offset: 756 data: 544 rows: 78 tail: 61 index: 76
+    Stream: column 0 section ROW_INDEX start: 756 length 11
+    Stream: column 1 section ROW_INDEX start: 767 length 27
+    Stream: column 2 section ROW_INDEX start: 794 length 38
+    Stream: column 1 section DATA start: 832 length 161
+    Stream: column 2 section DATA start: 993 length 332
+    Stream: column 2 section LENGTH start: 1325 length 51
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 78 hasNull: false min: 0 max: 497 sum: 18371 positions: 0,0,0
+  Stripe: offset: 1437 data: 519 rows: 74 tail: 61 index: 78
+    Stream: column 0 section ROW_INDEX start: 1437 length 11
+    Stream: column 1 section ROW_INDEX start: 1448 length 27
+    Stream: column 2 section ROW_INDEX start: 1475 length 40
+    Stream: column 1 section DATA start: 1515 length 153
+    Stream: column 2 section DATA start: 1668 length 331
+    Stream: column 2 section LENGTH start: 1999 length 35
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 74 hasNull: false min: 2 max: 493 sum: 19663 positions: 0,0,0
+
+File length: 2393 bytes
+Padding length: 0 bytes
+Padding ratio: 0%
+-- END ORC FILE DUMP --
+86	val_86	1	0
+PREHOOK: query: select * from orcfile_merge1c where ds='1' and part='0' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+#### A masked pattern was here ####
+-- BEGIN ORC FILE DUMP --
+#### A masked pattern was here ####
+File Version: 0.12 with HIVE_4243
+Rows: 242
+Compression: SNAPPY
+Compression size: 4096
+Type: struct<key:int,value:string>
+
+Stripe Statistics:
+  Stripe 1:
+    Column 0: count: 90 hasNull: false
+    Column 1: count: 90 hasNull: false min: 0 max: 495 sum: 22736
+    Column 2: count: 90 hasNull: false min: val_0 max: val_86 sum: 612
+  Stripe 2:
+    Column 0: count: 78 hasNull: false
+    Column 1: count: 78 hasNull: false min: 0 max: 497 sum: 18371
+    Column 2: count: 78 hasNull: false min: val_0 max: val_95 sum: 529
+  Stripe 3:
+    Column 0: count: 74 hasNull: false
+    Column 1: count: 74 hasNull: false min: 2 max: 493 sum: 19663
+    Column 2: count: 74 hasNull: false min: val_105 max: val_97 sum: 505
+
+File Statistics:
+  Column 0: count: 242 hasNull: false
+  Column 1: count: 242 hasNull: false min: 0 max: 497 sum: 60770
+  Column 2: count: 242 hasNull: false min: val_0 max: val_97 sum: 1646
+
+Stripes:
+  Stripe: offset: 3 data: 616 rows: 90 tail: 61 index: 76
+    Stream: column 0 section ROW_INDEX start: 3 length 11
+    Stream: column 1 section ROW_INDEX start: 14 length 27
+    Stream: column 2 section ROW_INDEX start: 41 length 38
+    Stream: column 1 section DATA start: 79 length 185
+    Stream: column 2 section DATA start: 264 length 377
+    Stream: column 2 section LENGTH start: 641 length 54
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 90 hasNull: false min: 0 max: 495 sum: 22736 positions: 0,0,0
+  Stripe: offset: 756 data: 544 rows: 78 tail: 61 index: 76
+    Stream: column 0 section ROW_INDEX start: 756 length 11
+    Stream: column 1 section ROW_INDEX start: 767 length 27
+    Stream: column 2 section ROW_INDEX start: 794 length 38
+    Stream: column 1 section DATA start: 832 length 161
+    Stream: column 2 section DATA start: 993 length 332
+    Stream: column 2 section LENGTH start: 1325 length 51
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 78 hasNull: false min: 0 max: 497 sum: 18371 positions: 0,0,0
+  Stripe: offset: 1437 data: 519 rows: 74 tail: 61 index: 78
+    Stream: column 0 section ROW_INDEX start: 1437 length 11
+    Stream: column 1 section ROW_INDEX start: 1448 length 27
+    Stream: column 2 section ROW_INDEX start: 1475 length 40
+    Stream: column 1 section DATA start: 1515 length 153
+    Stream: column 2 section DATA start: 1668 length 331
+    Stream: column 2 section LENGTH start: 1999 length 35
+    Encoding column 0: DIRECT
+    Encoding column 1: DIRECT_V2
+    Encoding column 2: DIRECT_V2
+    Row group indices for column 1:
+      Entry 0: count: 74 hasNull: false min: 2 max: 493 sum: 19663 positions: 0,0,0
+
+File length: 2393 bytes
+Padding length: 0 bytes
+Padding ratio: 0%
+-- END ORC FILE DUMP --
+86	val_86	1	0
+PREHOOK: query: DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Output: default@orcfile_merge1
+PREHOOK: query: DROP TABLE orcfile_merge1b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Output: default@orcfile_merge1b
+PREHOOK: query: DROP TABLE orcfile_merge1c
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Output: default@orcfile_merge1c