You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2013/08/02 06:55:14 UTC
svn commit: r1509543 - in /hive/trunk/ql/src:
java/org/apache/hadoop/hive/ql/optimizer/
java/org/apache/hadoop/hive/ql/parse/ test/queries/clientpositive/
test/results/clientpositive/
Author: hashutosh
Date: Fri Aug 2 04:55:13 2013
New Revision: 1509543
URL: http://svn.apache.org/r1509543
Log:
HIVE-4968 : When deduplicating multiple SelectOperators, we should update RowResolver accordinly (Yin Huai via Ashutosh Chauhan)
Modified:
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q
hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java?rev=1509543&r1=1509542&r2=1509543&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java Fri Aug 2 04:55:13 2013
@@ -19,6 +19,7 @@
package org.apache.hadoop.hive.ql.optimizer;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
@@ -49,8 +50,11 @@ import org.apache.hadoop.hive.ql.plan.Ex
*/
public class NonBlockingOpDeDupProc implements Transform {
+ private ParseContext pctx;
+
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
+ this.pctx = pctx;
String SEL = SelectOperator.getOperatorName();
String FIL = FilterOperator.getOperatorName();
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
@@ -66,7 +70,7 @@ public class NonBlockingOpDeDupProc impl
return pctx;
}
- static class SelectDedup implements NodeProcessor {
+ private class SelectDedup implements NodeProcessor {
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
@@ -86,19 +90,42 @@ public class NonBlockingOpDeDupProc impl
Set<String> funcOutputs = getFunctionOutputs(
pSEL.getConf().getOutputColumnNames(), pSEL.getConf().getColList());
- List<ExprNodeDesc> sources = cSEL.getConf().getColList();
- if (!funcOutputs.isEmpty() && !checkReferences(sources, funcOutputs)) {
+ List<ExprNodeDesc> cSELColList = cSEL.getConf().getColList();
+ List<String> cSELOutputColumnNames = cSEL.getConf().getOutputColumnNames();
+ if (!funcOutputs.isEmpty() && !checkReferences(cSELColList, funcOutputs)) {
return null;
}
- pSEL.getConf().setColList(ExprNodeDescUtils.backtrack(sources, cSEL, pSEL));
- pSEL.getConf().setOutputColumnNames(cSEL.getConf().getOutputColumnNames());
-
- // updates schema only (this should be the last optimizer modifying operator tree)
+ if (cSEL.getColumnExprMap() == null) {
+ // If the child SelectOperator does not have the ColumnExprMap,
+ // we do not need to update the ColumnExprMap in the parent SelectOperator.
+ pSEL.getConf().setColList(ExprNodeDescUtils.backtrack(cSELColList, cSEL, pSEL));
+ pSEL.getConf().setOutputColumnNames(cSELOutputColumnNames);
+ } else {
+ // If the child SelectOperator has the ColumnExprMap,
+ // we need to update the ColumnExprMap in the parent SelectOperator.
+ List<ExprNodeDesc> newPSELColList = new ArrayList<ExprNodeDesc>();
+ List<String> newPSELOutputColumnNames = new ArrayList<String>();
+ Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
+ for (int i= 0; i < cSELOutputColumnNames.size(); i++) {
+ String outputColumnName = cSELOutputColumnNames.get(i);
+ ExprNodeDesc cSELExprNodeDesc = cSELColList.get(i);
+ ExprNodeDesc newPSELExprNodeDesc =
+ ExprNodeDescUtils.backtrack(cSELExprNodeDesc, cSEL, pSEL);
+ newPSELColList.add(newPSELExprNodeDesc);
+ newPSELOutputColumnNames.add(outputColumnName);
+ colExprMap.put(outputColumnName, newPSELExprNodeDesc);
+ }
+ pSEL.getConf().setColList(newPSELColList);
+ pSEL.getConf().setOutputColumnNames(newPSELOutputColumnNames);
+ pSEL.setColumnExprMap(colExprMap);
+ }
pSEL.setSchema(cSEL.getSchema());
}
pSEL.getConf().setSelectStar(cSEL.getConf().isSelectStar());
-
+ // We need to use the OpParseContext of the child SelectOperator to replace the
+ // the OpParseContext of the parent SelectOperator.
+ pctx.updateOpParseCtx(pSEL, pctx.removeOpParseCtx(cSEL));
pSEL.removeChildAndAdoptItsChildren(cSEL);
cSEL.setParentOperators(null);
cSEL.setChildOperators(null);
@@ -148,7 +175,7 @@ public class NonBlockingOpDeDupProc impl
}
}
- static class FilterDedup implements NodeProcessor {
+ private class FilterDedup implements NodeProcessor {
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java?rev=1509543&r1=1509542&r2=1509543&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java Fri Aug 2 04:55:13 2013
@@ -392,6 +392,27 @@ public class ParseContext {
}
/**
+ * Remove the OpParseContext of a specific operator op
+ * @param op
+ * @return
+ */
+ public OpParseContext removeOpParseCtx(Operator<? extends OperatorDesc> op) {
+ return opParseCtx.remove(op);
+ }
+
+ /**
+ * Update the OpParseContext of operator op to newOpParseContext.
+ * If op is not in opParseCtx, a new entry will be added into opParseCtx.
+ * The key is op, and the value is newOpParseContext.
+ * @param op
+ * @param newOpParseContext
+ */
+ public void updateOpParseCtx(Operator<? extends OperatorDesc> op,
+ OpParseContext newOpParseContext) {
+ opParseCtx.put(op, newOpParseContext);
+ }
+
+ /**
* @param opParseCtx
* the opParseCtx to set
*/
Modified: hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q?rev=1509543&r1=1509542&r2=1509543&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q Fri Aug 2 04:55:13 2013
@@ -1,2 +1,45 @@
-- negative, references twice for result of funcion
explain select nkey, nkey + 1 from (select key + 1 as nkey, value from src) a;
+
+set hive.auto.convert.join=false;
+-- This test query is introduced for HIVE-4968.
+-- First, we do not convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4;
+
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4;
+
+set hive.auto.convert.join=true;
+-- Then, we convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4;
+
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4;
Modified: hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out?rev=1509543&r1=1509542&r2=1509543&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out Fri Aug 2 04:55:13 2013
@@ -42,3 +42,362 @@ STAGE PLANS:
limit: -1
+PREHOOK: query: -- This test query is introduced for HIVE-4968.
+-- First, we do not convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This test query is introduced for HIVE-4968.
+-- First, we do not convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+ (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) tmp1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) tmp2) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count) count)))) tmp3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp3) count) count)))) tmp4)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tm
p4) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) count) count))))
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Alias -> Map Operator Tree:
+ tmp4:tmp3:src1
+ TableScan
+ alias: src1
+ Select Operator
+ Group By Operator
+ aggregations:
+ expr: count()
+ bucketGroup: false
+ mode: hash
+ outputColumnNames: _col0
+ Reduce Output Operator
+ sort order:
+ tag: -1
+ value expressions:
+ expr: _col0
+ type: bigint
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations:
+ expr: count(VALUE._col0)
+ bucketGroup: false
+ mode: mergepartial
+ outputColumnNames: _col0
+ Select Operator
+ expressions:
+ expr: _col0
+ type: bigint
+ outputColumnNames: _col0
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+ Stage: Stage-2
+ Map Reduce
+ Alias -> Map Operator Tree:
+ $INTNAME
+ Reduce Output Operator
+ sort order:
+ tag: 1
+ value expressions:
+ expr: _col0
+ type: bigint
+ tmp4:tmp2:tmp1:src1
+ TableScan
+ alias: src1
+ Select Operator
+ expressions:
+ expr: key
+ type: string
+ expr: value
+ type: string
+ outputColumnNames: _col0, _col1
+ Reduce Output Operator
+ sort order:
+ tag: 0
+ value expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ Reduce Operator Tree:
+ Join Operator
+ condition map:
+ Inner Join 0 to 1
+ condition expressions:
+ 0 {VALUE._col0} {VALUE._col1}
+ 1 {VALUE._col0}
+ handleSkewJoin: false
+ outputColumnNames: _col0, _col1, _col2
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ expr: _col2
+ type: bigint
+ outputColumnNames: _col0, _col1, _col2
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+
+
+PREHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+238 val_238 25
+ 25
+311 val_311 25
+ val_27 25
+ val_165 25
+ val_409 25
+255 val_255 25
+278 val_278 25
+98 val_98 25
+ val_484 25
+ val_265 25
+ val_193 25
+401 val_401 25
+150 val_150 25
+273 val_273 25
+224 25
+369 25
+66 val_66 25
+128 25
+213 val_213 25
+146 val_146 25
+406 val_406 25
+ 25
+ 25
+ 25
+PREHOOK: query: -- Then, we convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Then, we convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+ (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) tmp1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) tmp2) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count) count)))) tmp3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp3) count) count)))) tmp4)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tm
p4) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) count) count))))
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-5 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-5
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Alias -> Map Operator Tree:
+ tmp4:tmp3:src1
+ TableScan
+ alias: src1
+ Select Operator
+ Group By Operator
+ aggregations:
+ expr: count()
+ bucketGroup: false
+ mode: hash
+ outputColumnNames: _col0
+ Reduce Output Operator
+ sort order:
+ tag: -1
+ value expressions:
+ expr: _col0
+ type: bigint
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations:
+ expr: count(VALUE._col0)
+ bucketGroup: false
+ mode: mergepartial
+ outputColumnNames: _col0
+ Select Operator
+ expressions:
+ expr: _col0
+ type: bigint
+ outputColumnNames: _col0
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+ Stage: Stage-5
+ Map Reduce Local Work
+ Alias -> Map Local Tables:
+ tmp4:tmp2:tmp1:src1
+ Fetch Operator
+ limit: -1
+ Alias -> Map Local Operator Tree:
+ tmp4:tmp2:tmp1:src1
+ TableScan
+ alias: src1
+ Select Operator
+ expressions:
+ expr: key
+ type: string
+ expr: value
+ type: string
+ outputColumnNames: _col0, _col1
+ HashTable Sink Operator
+ condition expressions:
+ 0 {_col0} {_col1}
+ 1 {_col0}
+ handleSkewJoin: false
+ keys:
+ 0 []
+ 1 []
+ Position of Big Table: 1
+
+ Stage: Stage-4
+ Map Reduce
+ Alias -> Map Operator Tree:
+ $INTNAME
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ condition expressions:
+ 0 {_col0} {_col1}
+ 1 {_col0}
+ handleSkewJoin: false
+ keys:
+ 0 []
+ 1 []
+ outputColumnNames: _col0, _col1, _col2
+ Position of Big Table: 1
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ expr: _col2
+ type: bigint
+ outputColumnNames: _col0, _col1, _col2
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ Local Work:
+ Map Reduce Local Work
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+
+
+PREHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+ FROM (SELECT *
+ FROM (SELECT key, value
+ FROM src1) tmp1 ) tmp2
+ JOIN (SELECT count(*) as count
+ FROM src1) tmp3
+ ) tmp4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+238 val_238 25
+ 25
+311 val_311 25
+ val_27 25
+ val_165 25
+ val_409 25
+255 val_255 25
+278 val_278 25
+98 val_98 25
+ val_484 25
+ val_265 25
+ val_193 25
+401 val_401 25
+150 val_150 25
+273 val_273 25
+224 25
+369 25
+66 val_66 25
+128 25
+213 val_213 25
+146 val_146 25
+406 val_406 25
+ 25
+ 25
+ 25