You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2012/09/27 06:00:07 UTC

svn commit: r1390833 - in /hive/trunk/ql/src/java/org/apache/hadoop/hive/ql: ErrorMsg.java exec/Utilities.java optimizer/ppr/PartitionPruner.java

Author: namit
Date: Thu Sep 27 04:00:07 2012
New Revision: 1390833

URL: http://svn.apache.org/viewvc?rev=1390833&view=rev
Log:
HIVE-3397 PartitionPruner should log why it is not pushing the filter
down to JDO (Navis via namit)


Modified:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1390833&r1=1390832&r2=1390833&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Thu Sep 27 04:00:07 2012
@@ -250,6 +250,8 @@ public enum ErrorMsg {
   JOINNODE_OUTERJOIN_MORETHAN_8(10142, "Single join node containing outer join(s) " +
       "cannot have more than 8 aliases"),
 
+  INVALID_JDO_FILTER_EXPRESSION(10043, "Invalid expression for JDO filter"),
+
   CREATE_SKEWED_TABLE_NO_COLUMN_NAME(10200, "No skewed column name."),
   CREATE_SKEWED_TABLE_NO_COLUMN_VALUE(10201, "No skewed values."),
   CREATE_SKEWED_TABLE_DUPLICATE_COLUMN_NAMES(10202,

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1390833&r1=1390832&r2=1390833&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Thu Sep 27 04:00:07 2012
@@ -2060,13 +2060,16 @@ public final class Utilities {
    *     restriction by the current JDO filtering implementation.
    * @param tab The table that contains the partition columns.
    * @param expr the partition pruning expression
-   * @return true if the partition pruning expression can be pushed down to JDO filtering.
+   * @return null if the partition pruning expression can be pushed down to JDO filtering.
    */
-  public static boolean checkJDOPushDown(Table tab, ExprNodeDesc expr) {
+  public static String checkJDOPushDown(Table tab, ExprNodeDesc expr) {
     if (expr instanceof ExprNodeConstantDesc) {
       // JDO filter now only support String typed literal -- see Filter.g and ExpressionTree.java
       Object value = ((ExprNodeConstantDesc)expr).getValue();
-      return (value instanceof String);
+      if (value instanceof String) {
+        return null;
+      }
+      return "Constant " + value + " is not string type";
     } else if (expr instanceof ExprNodeColumnDesc) {
       // JDO filter now only support String typed literal -- see Filter.g and ExpressionTree.java
       TypeInfo type = expr.getTypeInfo();
@@ -2074,28 +2077,32 @@ public final class Utilities {
         String colName = ((ExprNodeColumnDesc)expr).getColumn();
         for (FieldSchema fs: tab.getPartCols()) {
           if (fs.getName().equals(colName)) {
-            return fs.getType().equals(Constants.STRING_TYPE_NAME);
+            if (fs.getType().equals(Constants.STRING_TYPE_NAME)) {
+              return null;
+            }
+            return "Partition column " + fs.getName() + " is not string type";
           }
         }
         assert(false); // cannot find the partition column!
      } else {
-       return false;
+        return "Column " + expr.getExprString() + " is not string type";
      }
     } else if (expr instanceof ExprNodeGenericFuncDesc) {
       ExprNodeGenericFuncDesc funcDesc = (ExprNodeGenericFuncDesc) expr;
       GenericUDF func = funcDesc.getGenericUDF();
       if (!supportedJDOFuncs(func)) {
-        return false;
+        return "Expression " + expr.getExprString() + " cannot be evaluated";
       }
       List<ExprNodeDesc> children = funcDesc.getChildExprs();
       for (ExprNodeDesc child: children) {
-        if (!checkJDOPushDown(tab, child)) {
-          return false;
+        String message = checkJDOPushDown(tab, child);
+        if (message != null) {
+          return message;
         }
       }
-      return true;
+      return null;
     }
-    return false;
+    return "Expression " + expr.getExprString() + " cannot be evaluated";
   }
 
   /**

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java?rev=1390833&r1=1390832&r2=1390833&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java Thu Sep 27 04:00:07 2012
@@ -226,19 +226,25 @@ public class PartitionPruner implements 
             // This could happen when hive.mapred.mode=nonstrict and all the predicates
             // are on non-partition columns.
             unkn_parts.addAll(Hive.get().getPartitions(tab));
-          } else if (Utilities.checkJDOPushDown(tab, compactExpr)) {
-            String filter = compactExpr.getExprString();
-            String oldFilter = prunerExpr.getExprString();
-
-            if (filter.equals(oldFilter)) {
-              // pruneExpr contains only partition columns
-              pruneByPushDown(tab, true_parts, filter);
+          } else {
+            String message = Utilities.checkJDOPushDown(tab, compactExpr);
+            if (message == null) {
+              String filter = compactExpr.getExprString();
+              String oldFilter = prunerExpr.getExprString();
+
+              if (filter.equals(oldFilter)) {
+                // pruneExpr contains only partition columns
+                pruneByPushDown(tab, true_parts, filter);
+              } else {
+                // pruneExpr contains non-partition columns
+                pruneByPushDown(tab, unkn_parts, filter);
+              }
             } else {
-              // pruneExpr contains non-partition columns
-              pruneByPushDown(tab, unkn_parts, filter);
+              LOG.info(ErrorMsg.INVALID_JDO_FILTER_EXPRESSION.getMsg("by condition '"
+                  + message + "'"));
+              pruneBySequentialScan(tab, true_parts, unkn_parts, denied_parts,
+                  prunerExpr, rowObjectInspector);
             }
-          } else {
-            pruneBySequentialScan(tab, true_parts, unkn_parts, denied_parts, prunerExpr, rowObjectInspector);
           }
         }
         LOG.debug("tabname = " + tab.getTableName() + " is partitioned");