You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/03/27 00:57:54 UTC
svn commit: r1582132 - in /hive/branches/branch-0.13:
common/src/java/org/apache/hadoop/hive/conf/
ql/src/java/org/apache/hadoop/hive/ql/
ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/
ql/src/java/org/apache/hadoop/hive/ql/parse/ ql/src/java...
Author: hashutosh
Date: Wed Mar 26 23:57:53 2014
New Revision: 1582132
URL: http://svn.apache.org/r1582132
Log:
HIVE-6492 : limit partition number involved in a table scan (Selina Zhang via Ashutosh Chauhan, Gunther Hagleitner)
Added:
hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition.q
hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition_stats.q
hive/branches/branch-0.13/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q
hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition.q.out
hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition_stats.q.out
hive/branches/branch-0.13/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
Modified:
hive/branches/branch-0.13/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java
hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
Modified: hive/branches/branch-0.13/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1582132&r1=1582131&r2=1582132&view=diff
==============================================================================
--- hive/branches/branch-0.13/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/branch-0.13/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Wed Mar 26 23:57:53 2014
@@ -556,6 +556,7 @@ public class HiveConf extends Configurat
HIVELIMITOPTENABLE("hive.limit.optimize.enable", false),
HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000),
HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", -1f),
+ HIVELIMITTABLESCANPARTITION("hive.limit.query.max.table.partition", -1),
HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000),
HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75),
Modified: hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1582132&r1=1582131&r2=1582132&view=diff
==============================================================================
--- hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Wed Mar 26 23:57:53 2014
@@ -416,6 +416,8 @@ public enum ErrorMsg {
DYNAMIC_PARTITIONS_TOO_MANY_PER_NODE_ERROR(20004, "Fatal error occurred when node " +
"tried to create too many dynamic partitions. The maximum number of dynamic partitions " +
"is controlled by hive.exec.max.dynamic.partitions and hive.exec.max.dynamic.partitions.pernode. "),
+ PARTITION_SCAN_LIMIT_EXCEEDED(20005, "Number of partitions scanned (={0}) on table {1} exceeds limit" +
+ " (={2}). This is controlled by hive.limit.query.max.table.partition.", true),
//========================== 30000 range starts here ========================//
STATSPUBLISHER_NOT_OBTAINED(30000, "StatsPublisher cannot be obtained. " +
Modified: hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java?rev=1582132&r1=1582131&r2=1582132&view=diff
==============================================================================
--- hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java (original)
+++ hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java Wed Mar 26 23:57:53 2014
@@ -305,6 +305,7 @@ public class MetadataOnlyOptimizer imple
while (iterator.hasNext()) {
TableScanOperator tso = iterator.next();
+ ((TableScanDesc)tso.getConf()).setIsMetadataOnly(true);
MapWork work = ((MapredWork) task.getWork()).getMapWork();
String alias = getAliasForTableScanOperator(work, tso);
LOG.info("Metadata only table scan for " + alias);
Modified: hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1582132&r1=1582131&r2=1582132&view=diff
==============================================================================
--- hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Wed Mar 26 23:57:53 2014
@@ -62,6 +62,7 @@ import org.apache.hadoop.hive.ql.QueryPr
import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
import org.apache.hadoop.hive.ql.exec.ArchiveUtils;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
+import org.apache.hadoop.hive.ql.exec.FetchTask;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
import org.apache.hadoop.hive.ql.exec.FunctionInfo;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
@@ -9268,6 +9269,8 @@ public class SemanticAnalyzer extends Ba
optm.setPctx(pCtx);
optm.initialize(conf);
pCtx = optm.optimize();
+
+ FetchTask origFetchTask = pCtx.getFetchTask();
if (LOG.isDebugEnabled()) {
LOG.debug("After logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
@@ -9291,9 +9294,49 @@ public class SemanticAnalyzer extends Ba
LOG.info("Completed plan generation");
+ if (!ctx.getExplain()) {
+ // if desired check we're not going over partition scan limits
+ enforceScanLimits(pCtx, origFetchTask);
+ }
+
return;
}
+ private void enforceScanLimits(ParseContext pCtx, FetchTask fTask)
+ throws SemanticException {
+ int scanLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITTABLESCANPARTITION);
+
+ if (scanLimit > -1) {
+ // a scan limit on the number of partitions has been set by the user
+ if (fTask != null) {
+ // having a fetch task at this point means that we're not going to
+ // launch a job on the cluster
+ if (!fTask.getWork().isNotPartitioned() && fTask.getWork().getLimit() == -1
+ && scanLimit < fTask.getWork().getPartDir().size()) {
+ throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, ""
+ + fTask.getWork().getPartDir().size(), ""
+ + fTask.getWork().getTblDesc().getTableName(), "" + scanLimit);
+ }
+ } else {
+ // At this point we've run the partition pruner for all top ops. Let's
+ // check whether any of them break the limit
+ for (Operator<?> topOp : topOps.values()) {
+ if (topOp instanceof TableScanOperator) {
+ if (((TableScanDesc)topOp.getConf()).getIsMetadataOnly()) {
+ continue;
+ }
+ PrunedPartitionList parts = pCtx.getOpToPartList().get((TableScanOperator) topOp);
+ if (parts.getPartitions().size() > scanLimit) {
+ throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, ""
+ + parts.getPartitions().size(), "" + parts.getSourceTable().getTableName(), ""
+ + scanLimit);
+ }
+ }
+ }
+ }
+ }
+ }
+
@Override
public List<FieldSchema> getResultSchema() {
return resultSchema;
Modified: hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java?rev=1582132&r1=1582131&r2=1582132&view=diff
==============================================================================
--- hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java (original)
+++ hive/branches/branch-0.13/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java Wed Mar 26 23:57:53 2014
@@ -70,6 +70,8 @@ public class TableScanDesc extends Abstr
// input file name (big) to bucket number
private Map<String, Integer> bucketFileNameMapping;
+
+ private boolean isMetadataOnly = false;
@SuppressWarnings("nls")
public TableScanDesc() {
@@ -192,4 +194,12 @@ public class TableScanDesc extends Abstr
public void setBucketFileNameMapping(Map<String, Integer> bucketFileNameMapping) {
this.bucketFileNameMapping = bucketFileNameMapping;
}
+
+ public void setIsMetadataOnly(boolean metadata_only) {
+ isMetadataOnly = metadata_only;
+ }
+
+ public boolean getIsMetadataOnly() {
+ return isMetadataOnly;
+ }
}
Added: hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition.q
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition.q?rev=1582132&view=auto
==============================================================================
--- hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition.q (added)
+++ hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition.q Wed Mar 26 23:57:53 2014
@@ -0,0 +1,7 @@
+set hive.limit.query.max.table.partition=1;
+
+explain select * from srcpart limit 1;
+select * from srcpart limit 1;
+
+explain select * from srcpart;
+select * from srcpart;
Added: hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition_stats.q
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition_stats.q?rev=1582132&view=auto
==============================================================================
--- hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition_stats.q (added)
+++ hive/branches/branch-0.13/ql/src/test/queries/clientnegative/limit_partition_stats.q Wed Mar 26 23:57:53 2014
@@ -0,0 +1,18 @@
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.stats.autogather=true;
+set hive.compute.query.using.stats=true;
+
+create table part (c int) partitioned by (d string);
+insert into table part partition (d)
+select hr,ds from srcpart;
+
+set hive.limit.query.max.table.partition=1;
+
+explain select count(*) from part;
+select count(*) from part;
+
+set hive.compute.query.using.stats=false;
+
+explain select count(*) from part;
+select count(*) from part;
Added: hive/branches/branch-0.13/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q?rev=1582132&view=auto
==============================================================================
--- hive/branches/branch-0.13/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q (added)
+++ hive/branches/branch-0.13/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q Wed Mar 26 23:57:53 2014
@@ -0,0 +1,7 @@
+set hive.limit.query.max.table.partition=1;
+
+explain select ds from srcpart where hr=11 and ds='2008-04-08';
+select ds from srcpart where hr=11 and ds='2008-04-08';
+
+explain select distinct hr from srcpart;
+select distinct hr from srcpart;
Added: hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition.q.out
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition.q.out?rev=1582132&view=auto
==============================================================================
--- hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition.q.out (added)
+++ hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition.q.out Wed Mar 26 23:57:53 2014
@@ -0,0 +1,63 @@
+PREHOOK: query: explain select * from srcpart limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from srcpart limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 1
+ Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ ListSink
+
+PREHOOK: query: select * from srcpart limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from srcpart limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+238 val_238 2008-04-08 11
+PREHOOK: query: explain select * from srcpart
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from srcpart
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE
+ ListSink
+
+FAILED: SemanticException Number of partitions scanned (=4) on table default.srcpart exceeds limit (=1). This is controlled by hive.limit.query.max.table.partition.
Added: hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition_stats.q.out
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition_stats.q.out?rev=1582132&view=auto
==============================================================================
--- hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition_stats.q.out (added)
+++ hive/branches/branch-0.13/ql/src/test/results/clientnegative/limit_partition_stats.q.out Wed Mar 26 23:57:53 2014
@@ -0,0 +1,102 @@
+PREHOOK: query: create table part (c int) partitioned by (d string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table part (c int) partitioned by (d string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@part
+PREHOOK: query: insert into table part partition (d)
+select hr,ds from srcpart
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@part
+POSTHOOK: query: insert into table part partition (d)
+select hr,ds from srcpart
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@part@d=2008-04-08
+POSTHOOK: Output: default@part@d=2008-04-09
+POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+PREHOOK: query: explain select count(*) from part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from part
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+
+PREHOOK: query: select count(*) from part
+PREHOOK: type: QUERY
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from part
+POSTHOOK: type: QUERY
+#### A masked pattern was here ####
+POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+2000
+PREHOOK: query: explain select count(*) from part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from part
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: _col0 (type: bigint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+
+FAILED: SemanticException Number of partitions scanned (=2) on table part exceeds limit (=1). This is controlled by hive.limit.query.max.table.partition.
Added: hive/branches/branch-0.13/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.13/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out?rev=1582132&view=auto
==============================================================================
--- hive/branches/branch-0.13/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out (added)
+++ hive/branches/branch-0.13/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out Wed Mar 26 23:57:53 2014
@@ -0,0 +1,610 @@
+PREHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
+ Select Operator
+ expressions: ds (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+
+PREHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+2008-04-08
+PREHOOK: query: explain select distinct hr from srcpart
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select distinct hr from srcpart
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE
+ Select Operator
+ expressions: hr (type: string)
+ outputColumnNames: hr
+ Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE
+ Group By Operator
+ keys: hr (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+ Select Operator
+ expressions: _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+
+PREHOOK: query: select distinct hr from srcpart
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select distinct hr from srcpart
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+11
+12