You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/09/29 19:37:15 UTC
svn commit: r1628230 - in /hive/branches/branch-0.14/ql/src:
java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java
java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
test/results/clientpositive/tez/cbo_correctness.q.out
Author: hashutosh
Date: Mon Sep 29 17:37:14 2014
New Revision: 1628230
URL: http://svn.apache.org/r1628230
Log:
HIVE-8228 : CBO: fix couple of issues with partition pruning (Harish Butani via Ashutosh Chauhan)
Modified:
hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java
hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out
Modified: hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java?rev=1628230&r1=1628229&r2=1628230&view=diff
==============================================================================
--- hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java (original)
+++ hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java Mon Sep 29 17:37:14 2014
@@ -24,7 +24,10 @@ import java.util.List;
import java.util.Set;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.eigenbase.relopt.RelOptCluster;
import org.eigenbase.reltype.RelDataType;
import org.eigenbase.reltype.RelDataTypeField;
@@ -104,6 +107,13 @@ public class PartitionPruner {
List<RexNode> args = new LinkedList<RexNode>();
boolean argsPruned = false;
+ GenericUDF hiveUDF = SqlFunctionConverter.getHiveUDF(call.getOperator(),
+ call.getType());
+ if (hiveUDF != null &&
+ !FunctionRegistry.isDeterministic(hiveUDF)) {
+ return null;
+ }
+
for (RexNode operand : call.operands) {
RexNode n = operand.accept(this);
if (n != null) {
Modified: hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1628230&r1=1628229&r2=1628230&view=diff
==============================================================================
--- hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/branch-0.14/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Mon Sep 29 17:37:14 2014
@@ -12309,6 +12309,11 @@ public class SemanticAnalyzer extends Ba
HiveSortRel.HIVE_SORT_REL_FACTORY, HiveAggregateRel.HIVE_AGGR_REL_FACTORY, HiveUnionRel.UNION_REL_FACTORY);
basePlan = fieldTrimmer.trim(basePlan);
+ basePlan = hepPlan(basePlan, true, mdProvider,
+ new PushFilterPastProjectRule(FilterRelBase.class,
+ HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveProjectRel.class,
+ HiveProjectRel.DEFAULT_PROJECT_FACTORY));
+
return basePlan;
}
Modified: hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out?rev=1628230&r1=1628229&r2=1628230&view=diff
==============================================================================
--- hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out (original)
+++ hive/branches/branch-0.14/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out Mon Sep 29 17:37:14 2014
@@ -16734,26 +16734,6 @@ POSTHOOK: Input: default@t1@dt=2014
POSTHOOK: Input: default@t2
POSTHOOK: Input: default@t2@dt=2014
#### A masked pattern was here ####
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
- 1 1 1 1.0 true 2014
- 1 1 1 1.0 true 2014
- 1 1 1 1.0 true 2014
- 1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 true 2014
-1 1 1 1.0 false 2014
-1 1 1 1.0 false 2014
-null null NULL NULL NULL 2014
-null null NULL NULL NULL 2014
1 1 1 1.0 true 2014
1 1 1 1.0 true 2014
1 1 1 1.0 true 2014
@@ -16774,6 +16754,26 @@ null null NULL NULL NULL 2014
2 2 2 2.0 true 2014
null null NULL NULL NULL 2014
null null NULL NULL NULL 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+null null NULL NULL NULL 2014
+null null NULL NULL NULL 2014
PREHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key
PREHOOK: type: QUERY
PREHOOK: Input: default@t1