You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by px...@apache.org on 2015/12/19 08:45:15 UTC

hive git commit: HIVE-11775: Implement limit push down through union all in CBO (Pengcheng Xiong, reviewed by Laljo John Pullokkaran)

Repository: hive
Updated Branches:
  refs/heads/master f1ecce036 -> 71536a2f8


HIVE-11775: Implement limit push down through union all in CBO (Pengcheng Xiong, reviewed by Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/71536a2f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/71536a2f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/71536a2f

Branch: refs/heads/master
Commit: 71536a2f8295f61602e776e4e5773c7007a46b69
Parents: f1ecce0
Author: Pengcheng Xiong <px...@apache.org>
Authored: Fri Dec 18 23:44:40 2015 -0800
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Fri Dec 18 23:44:40 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   20 +-
 .../calcite/rules/HiveSortUnionReduceRule.java  |  109 ++
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   10 +-
 .../clientpositive/cbo_SortUnionTransposeRule.q |  100 ++
 .../clientpositive/limit_join_transpose.q       |   16 +-
 .../clientpositive/tez_dynpart_hashjoin_3.q     |    6 +-
 .../cbo_SortUnionTransposeRule.q.out            | 1196 ++++++++++++++++++
 7 files changed, 1432 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/71536a2f/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 60ac0c0..9e8e2f5 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1312,16 +1312,16 @@ public class HiveConf extends Configuration {
         "we are increasing the number of files possibly by a big margin. So, we merge aggressively."),
     HIVEOPTCORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."),
 
-    HIVE_OPTIMIZE_LIMIT_JOIN_TRANSPOSE("hive.optimize.limitjointranspose", false,
-        "Whether to push a limit through left/right outer join. If the value is true and the size of the outer\n" +
-        "input is reduced enough (as specified in hive.optimize.limitjointranspose.reduction), the limit is pushed\n" +
-        "to the outer input; to remain semantically correct, the limit is kept on top of the join too."),
-    HIVE_OPTIMIZE_LIMIT_JOIN_TRANSPOSE_REDUCTION_PERCENTAGE("hive.optimize.limitjointranspose.reductionpercentage", 1.0f,
-        "When hive.optimize.limitjointranspose is true, this variable specifies the minimal reduction of the\n" +
-        "size of the outer input of the join that we should get in order to apply the rule."),
-    HIVE_OPTIMIZE_LIMIT_JOIN_TRANSPOSE_REDUCTION_TUPLES("hive.optimize.limitjointranspose.reductiontuples", (long) 0,
-        "When hive.optimize.limitjointranspose is true, this variable specifies the minimal reduction in the\n" +
-        "number of tuples of the outer input of the join that you should get in order to apply the rule."),
+    HIVE_OPTIMIZE_LIMIT_TRANSPOSE("hive.optimize.limittranspose", false,
+        "Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer\n" +
+        "input is reduced enough (as specified in hive.optimize.limittranspose.reduction), the limit is pushed\n" +
+        "to the outer input or union; to remain semantically correct, the limit is kept on top of the join or the union too."),
+    HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE("hive.optimize.limittranspose.reductionpercentage", 1.0f,
+        "When hive.optimize.limittranspose is true, this variable specifies the minimal reduction of the\n" +
+        "size of the outer input of the join or input of the union that we should get in order to apply the rule."),
+    HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES("hive.optimize.limittranspose.reductiontuples", (long) 0,
+        "When hive.optimize.limittranspose is true, this variable specifies the minimal reduction in the\n" +
+        "number of tuples of the outer input of the join or the input of the union that you should get in order to apply the rule."),
 
     HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", false,
         "Whether to create a separate plan for skewed keys for the tables in the join.\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/71536a2f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java
new file mode 100644
index 0000000..0ec8bf1
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortUnionReduceRule.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
+
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Sort;
+import org.apache.calcite.rel.core.Union;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
+
+/**
+ * Planner rule that pushes a
+ * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit}
+ * past a
+ * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion}.
+ */
+public class HiveSortUnionReduceRule extends RelOptRule {
+
+  /**
+   * Rule instance for Union implementation that does not preserve the ordering
+   * of its inputs. Thus, it makes no sense to match this rule if the Sort does
+   * not have a limit, i.e., {@link Sort#fetch} is null.
+   */
+  public static final HiveSortUnionReduceRule INSTANCE = new HiveSortUnionReduceRule();
+
+  // ~ Constructors -----------------------------------------------------------
+
+  private HiveSortUnionReduceRule() {
+    super(
+        operand(
+            HiveSortLimit.class,
+            operand(HiveUnion.class, any())));
+  }
+
+  // ~ Methods ----------------------------------------------------------------
+
+  @Override
+  public boolean matches(RelOptRuleCall call) {
+    final HiveSortLimit sort = call.rel(0);
+    final HiveUnion union = call.rel(1);
+
+    // We only apply this rule if Union.all is true.
+    // And Sort.fetch is not null and it is more than 0.
+    return union.all && sort.fetch != null
+        // Calite bug CALCITE-987
+        && RexLiteral.intValue(sort.fetch) > 0;
+  }
+
+  public void onMatch(RelOptRuleCall call) {
+    final HiveSortLimit sort = call.rel(0);
+    final HiveUnion union = call.rel(1);
+    List<RelNode> inputs = new ArrayList<>();
+    // Thus we use 'finishPushSortPastUnion' as a flag to identify if we have finished pushing the
+    // sort past a union.
+    boolean finishPushSortPastUnion = true;
+    final int offset = sort.offset == null ? 0 : RexLiteral.intValue(sort.offset);
+    for (RelNode input : union.getInputs()) {
+      // If we do not reduce the input size, we bail out
+      if (RexLiteral.intValue(sort.fetch) + offset < RelMetadataQuery.getRowCount(input)) {
+        finishPushSortPastUnion = false;
+        // Here we do some query rewrite. We first get the new fetchRN, which is
+        // a sum of offset and fetch.
+        // We then push it through by creating a new branchSort with the new
+        // fetchRN but no offset.
+        RexNode fetchRN = sort.getCluster().getRexBuilder()
+            .makeExactLiteral(BigDecimal.valueOf(RexLiteral.intValue(sort.fetch) + offset));
+        HiveSortLimit branchSort = sort.copy(sort.getTraitSet(), input, sort.getCollation(), null,
+            fetchRN);
+        branchSort.setRuleCreated(true);
+        inputs.add(branchSort);
+      } else {
+        inputs.add(input);
+      }
+    }
+    // there is nothing to change
+    if (finishPushSortPastUnion) {
+      return;
+    }
+    // create new union and sort
+    HiveUnion unionCopy = (HiveUnion) union.copy(union.getTraitSet(), inputs, union.all);
+    HiveSortLimit result = sort.copy(sort.getTraitSet(), unionCopy, sort.getCollation(), sort.offset,
+        sort.fetch);
+    call.transformTo(result);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/71536a2f/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 7c5a43f..f2da304 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -159,6 +159,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortJoinReduceRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortMergeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortProjectTransposeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortRemoveRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortUnionReduceRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveWindowingFixRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverter;
@@ -1086,16 +1087,17 @@ public class CalcitePlanner extends SemanticAnalyzer {
       // NOTE: We run this after PPD to support old style join syntax.
       // Ex: select * from R1 left outer join R2 where ((R1.x=R2.x) and R1.y<10) or
       // ((R1.x=R2.x) and R1.z=10)) and rand(1) < 0.1 order by R1.x limit 10
-      if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_LIMIT_JOIN_TRANSPOSE)) {
+      if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_LIMIT_TRANSPOSE)) {
         perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
         // This should be a cost based decision, but till we enable the extended cost
         // model, we will use the given value for the variable
         final float reductionProportion = HiveConf.getFloatVar(conf,
-            HiveConf.ConfVars.HIVE_OPTIMIZE_LIMIT_JOIN_TRANSPOSE_REDUCTION_PERCENTAGE);
+            HiveConf.ConfVars.HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE);
         final long reductionTuples = HiveConf.getLongVar(conf,
-            HiveConf.ConfVars.HIVE_OPTIMIZE_LIMIT_JOIN_TRANSPOSE_REDUCTION_TUPLES);
+            HiveConf.ConfVars.HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES);
         basePlan = hepPlan(basePlan, true, mdProvider, HiveSortMergeRule.INSTANCE,
-            HiveSortProjectTransposeRule.INSTANCE, HiveSortJoinReduceRule.INSTANCE);
+            HiveSortProjectTransposeRule.INSTANCE, HiveSortJoinReduceRule.INSTANCE,
+            HiveSortUnionReduceRule.INSTANCE);
         basePlan = hepPlan(basePlan, true, mdProvider, HepMatchOrder.BOTTOM_UP,
             new HiveSortRemoveRule(reductionProportion, reductionTuples),
             HiveProjectSortTransposeRule.INSTANCE);

http://git-wip-us.apache.org/repos/asf/hive/blob/71536a2f/ql/src/test/queries/clientpositive/cbo_SortUnionTransposeRule.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_SortUnionTransposeRule.q b/ql/src/test/queries/clientpositive/cbo_SortUnionTransposeRule.q
new file mode 100644
index 0000000..9c8c4be
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/cbo_SortUnionTransposeRule.q
@@ -0,0 +1,100 @@
+set hive.mapred.mode=nonstrict;
+
+create table s as select * from src limit 10;
+
+explain
+select key from s a
+union all
+select key from s b
+order by key;
+
+explain
+select key from s a
+union all
+select key from s b
+limit 0;
+
+explain
+select key from s a
+union all
+select key from s b
+limit 5;
+
+explain
+select key from s a
+union all
+select key from s b
+order by key
+limit 5;
+
+explain
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq1
+union all 
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq2
+limit 5;
+
+set hive.optimize.limittranspose=true;
+
+explain
+select key from s a
+union all
+select key from s b
+order by key;
+
+explain
+select key from s a
+union all
+select key from s b
+limit 0;
+
+explain
+select key from s a
+union all
+select key from s b
+limit 5;
+
+explain
+select key from s a
+union all
+select key from s b
+order by key
+limit 5;
+
+explain
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq1
+union all 
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq2
+limit 5;
+
+set hive.optimize.limittranspose.reductionpercentage=0.1f;
+
+explain
+select key from s a
+union all
+select key from s b
+limit 5;
+
+set hive.optimize.limittranspose.reductionpercentage=1f;
+set hive.optimize.limittranspose.reductiontuples=8;
+
+explain
+select key from s a
+union all
+select key from s b
+limit 5;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/71536a2f/ql/src/test/queries/clientpositive/limit_join_transpose.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/limit_join_transpose.q b/ql/src/test/queries/clientpositive/limit_join_transpose.q
index 80430c6..da34806 100644
--- a/ql/src/test/queries/clientpositive/limit_join_transpose.q
+++ b/ql/src/test/queries/clientpositive/limit_join_transpose.q
@@ -1,5 +1,5 @@
 set hive.mapred.mode=nonstrict;
-set hive.optimize.limitjointranspose=false;
+set hive.optimize.limittranspose=false;
 
 explain
 select *
@@ -13,9 +13,9 @@ on src1.key = src2.key
 limit 1;
 
 
-set hive.optimize.limitjointranspose=true;
-set hive.optimize.limitjointranspose.reductionpercentage=0.0001f;
-set hive.optimize.limitjointranspose.reductiontuples=10;
+set hive.optimize.limittranspose=true;
+set hive.optimize.limittranspose.reductionpercentage=0.0001f;
+set hive.optimize.limittranspose.reductiontuples=10;
 
 explain
 select *
@@ -29,8 +29,8 @@ on src1.key = src2.key
 limit 1;
 
 
-set hive.optimize.limitjointranspose.reductionpercentage=0.1f;
-set hive.optimize.limitjointranspose.reductiontuples=10;
+set hive.optimize.limittranspose.reductionpercentage=0.1f;
+set hive.optimize.limittranspose.reductiontuples=10;
 
 explain
 select *
@@ -60,8 +60,8 @@ from src src1 right outer join (
 on src1.key = src2.key
 limit 1;
 
-set hive.optimize.limitjointranspose.reductionpercentage=1f;
-set hive.optimize.limitjointranspose.reductiontuples=0;
+set hive.optimize.limittranspose.reductionpercentage=1f;
+set hive.optimize.limittranspose.reductiontuples=0;
 
 explain
 select *

http://git-wip-us.apache.org/repos/asf/hive/blob/71536a2f/ql/src/test/queries/clientpositive/tez_dynpart_hashjoin_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_dynpart_hashjoin_3.q b/ql/src/test/queries/clientpositive/tez_dynpart_hashjoin_3.q
index cecbbd7..ff4cde2 100644
--- a/ql/src/test/queries/clientpositive/tez_dynpart_hashjoin_3.q
+++ b/ql/src/test/queries/clientpositive/tez_dynpart_hashjoin_3.q
@@ -1,6 +1,6 @@
-set hive.optimize.limitjointranspose=true;
-set hive.optimize.limitjointranspose.reductionpercentage=0.1f;
-set hive.optimize.limitjointranspose.reductiontuples=100;
+set hive.optimize.limittranspose=true;
+set hive.optimize.limittranspose.reductionpercentage=0.1f;
+set hive.optimize.limittranspose.reductiontuples=100;
 set hive.explain.user=false;
 set hive.auto.convert.join=false;
 set hive.optimize.dynamic.partition.hashjoin=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/71536a2f/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out b/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out
new file mode 100644
index 0000000..eef2389
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out
@@ -0,0 +1,1196 @@
+PREHOOK: query: create table s as select * from src limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@s
+POSTHOOK: query: create table s as select * from src limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@s
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+order by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 0
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 0
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+order by key
+limit 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+order by key
+limit 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq1
+union all 
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq2
+limit 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq1
+union all 
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq2
+limit 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-3 depends on stages: Stage-2, Stage-6
+  Stage-5 is a root stage
+  Stage-6 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col1 (type: string)
+          TableScan
+            alias: src1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col1, _col2
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 10
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col0 (type: string), _col1 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 10
+            Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Union
+              Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          TableScan
+            Union
+              Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col1 (type: string)
+          TableScan
+            alias: src1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col1, _col2
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 10
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col0 (type: string), _col1 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 10
+            Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+order by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 0
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 0
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1, Stage-3
+  Stage-3 is a root stage
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col0 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Union
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          TableScan
+            Union
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col0 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+order by key
+limit 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+order by key
+limit 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1, Stage-3
+  Stage-3 is a root stage
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Union
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
+          TableScan
+            Union
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq1
+union all 
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq2
+limit 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq1
+union all 
+select * from(
+select src1.key, src2.value
+from src src1 left outer join src src2
+on src1.key = src2.key
+limit 10)subq2
+limit 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-1 depends on stages: Stage-4
+  Stage-2 depends on stages: Stage-1
+  Stage-3 depends on stages: Stage-2, Stage-6
+  Stage-7 is a root stage
+  Stage-5 depends on stages: Stage-7
+  Stage-6 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col0 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col1 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: string)
+              Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col1, _col2
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 5
+              Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col0 (type: string), _col1 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Union
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          TableScan
+            Union
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-7
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 5
+                Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col0 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col1 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: string)
+              Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col1, _col2
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 5
+              Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col0 (type: string), _col1 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 5
+            Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from s a
+union all
+select key from s b
+limit 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          TableScan
+            alias: a
+            Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+              Union
+                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+