You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2010/10/19 20:01:24 UTC

svn commit: r1024341 - in /hadoop/hive/trunk: ./ hbase-handler/src/test/results/ metastore/src/java/org/apache/hadoop/hive/metastore/ metastore/src/java/org/apache/hadoop/hive/metastore/parser/ metastore/src/test/org/apache/hadoop/hive/metastore/ ql/sr...

Author: namit
Date: Tue Oct 19 18:01:23 2010
New Revision: 1024341

URL: http://svn.apache.org/viewvc?rev=1024341&view=rev
Log:
HIVE-1660. Change get_partitions_ps to pass partition filter to
database (Paul Yang via namit)


Modified:
    hadoop/hive/trunk/CHANGES.txt
    hadoop/hive/trunk/hbase-handler/src/test/results/hbase_pushdown.q.out
    hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
    hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
    hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
    hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
    hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/compact/IndexMetadataChangeTask.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java

Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Tue Oct 19 18:01:23 2010
@@ -196,6 +196,9 @@ Trunk -  Unreleased
     HIVE-1638. convert commonly used udfs to generic udfs
     (Siying Dong via namit)
 
+    HIVE-1660. Change get_partitions_ps to pass partition filter to
+    database (Paul Yang via namit)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/hive/trunk/hbase-handler/src/test/results/hbase_pushdown.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/hbase-handler/src/test/results/hbase_pushdown.q.out?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/hbase-handler/src/test/results/hbase_pushdown.q.out (original)
+++ hadoop/hive/trunk/hbase-handler/src/test/results/hbase_pushdown.q.out Tue Oct 19 18:01:23 2010
@@ -39,26 +39,27 @@ STAGE PLANS:
         hbase_pushdown 
           TableScan
             alias: hbase_pushdown
-            filterExpr:
-                expr: (key = 90)
-                type: boolean
             Filter Operator
               predicate:
                   expr: (key = 90)
                   type: boolean
-              Select Operator
-                expressions:
-                      expr: key
-                      type: int
-                      expr: value
-                      type: string
-                outputColumnNames: _col0, _col1
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              Filter Operator
+                predicate:
+                    expr: (key = 90)
+                    type: boolean
+                Select Operator
+                  expressions:
+                        expr: key
+                        type: int
+                        expr: value
+                        type: string
+                  outputColumnNames: _col0, _col1
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 
   Stage: Stage-0
     Fetch Operator
@@ -68,11 +69,11 @@ STAGE PLANS:
 PREHOOK: query: select * from hbase_pushdown where key=90
 PREHOOK: type: QUERY
 PREHOOK: Input: default@hbase_pushdown
-PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2010-09-09_17-05-54_049_1244315391309244934/-mr-10000
+PREHOOK: Output: file:/tmp/njain/hive_2010-10-19_10-45-49_710_5850592212021046409/-mr-10000
 POSTHOOK: query: select * from hbase_pushdown where key=90
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@hbase_pushdown
-POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2010-09-09_17-05-54_049_1244315391309244934/-mr-10000
+POSTHOOK: Output: file:/tmp/njain/hive_2010-10-19_10-45-49_710_5850592212021046409/-mr-10000
 90	val_90
 PREHOOK: query: -- with partial pushdown
 
@@ -96,12 +97,9 @@ STAGE PLANS:
         hbase_pushdown 
           TableScan
             alias: hbase_pushdown
-            filterExpr:
-                expr: (key = 90)
-                type: boolean
             Filter Operator
               predicate:
-                  expr: (value like '%90%')
+                  expr: ((key = 90) and (value like '%90%'))
                   type: boolean
               Filter Operator
                 predicate:
@@ -129,11 +127,11 @@ STAGE PLANS:
 PREHOOK: query: select * from hbase_pushdown where key=90 and value like '%90%'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@hbase_pushdown
-PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2010-09-09_17-06-00_089_9169062048458581014/-mr-10000
+PREHOOK: Output: file:/tmp/njain/hive_2010-10-19_10-45-54_425_6743860916711765300/-mr-10000
 POSTHOOK: query: select * from hbase_pushdown where key=90 and value like '%90%'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@hbase_pushdown
-POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2010-09-09_17-06-00_089_9169062048458581014/-mr-10000
+POSTHOOK: Output: file:/tmp/njain/hive_2010-10-19_10-45-54_425_6743860916711765300/-mr-10000
 90	val_90
 PREHOOK: query: -- with two residuals
 
@@ -159,12 +157,9 @@ STAGE PLANS:
         hbase_pushdown 
           TableScan
             alias: hbase_pushdown
-            filterExpr:
-                expr: (key = 90)
-                type: boolean
             Filter Operator
               predicate:
-                  expr: ((value like '%90%') and (key = UDFToInteger(value)))
+                  expr: (((key = 90) and (value like '%90%')) and (key = UDFToInteger(value)))
                   type: boolean
               Filter Operator
                 predicate:
@@ -244,12 +239,12 @@ PREHOOK: query: select * from hbase_push
 where key=80 and key=90 and value like '%90%'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@hbase_pushdown
-PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2010-09-09_17-06-05_982_8346366828445837832/-mr-10000
+PREHOOK: Output: file:/tmp/njain/hive_2010-10-19_10-46-00_265_1484640014270648892/-mr-10000
 POSTHOOK: query: select * from hbase_pushdown
 where key=80 and key=90 and value like '%90%'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@hbase_pushdown
-POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2010-09-09_17-06-05_982_8346366828445837832/-mr-10000
+POSTHOOK: Output: file:/tmp/njain/hive_2010-10-19_10-46-00_265_1484640014270648892/-mr-10000
 PREHOOK: query: -- with nothing to push down
 
 explain select * from hbase_pushdown

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Tue Oct 19 18:01:23 2010
@@ -1743,37 +1743,56 @@ public class HiveMetaStore extends Thrif
     }
 
     @Override
-    public List<Partition> get_partitions_ps(String db_name, String tbl_name,
-        List<String> part_vals, short max_parts) throws MetaException,
-        TException {
+    public List<Partition> get_partitions_ps(final String db_name,
+        final String tbl_name, final List<String> part_vals, final short max_parts)
+        throws MetaException, TException {
       incrementCounter("get_partitions_ps");
       logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals);
-      List<Partition> parts = null;
-      List<Partition> matchingParts = new ArrayList<Partition>();
-
-      // This gets all the partitions and then filters based on the specified
-      // criteria. An alternative approach would be to get all the partition
-      // names, do the filtering on the names, and get the partition for each
-      // of the names. that match.
 
+      Table t;
       try {
-         parts = get_partitions(db_name, tbl_name, (short) -1);
+        t = get_table(db_name, tbl_name);
       } catch (NoSuchObjectException e) {
         throw new MetaException(e.getMessage());
       }
 
-      for (Partition p : parts) {
-        if (MetaStoreUtils.pvalMatches(part_vals, p.getValues())) {
-          matchingParts.add(p);
+      if (part_vals.size() > t.getPartitionKeys().size()) {
+        throw new MetaException("Incorrect number of partition values");
+      }
+      // Create a map from the partition column name to the partition value
+      Map<String, String> partKeyToValues = new LinkedHashMap<String, String>();
+      int i=0;
+      for (String value : part_vals) {
+        String col = t.getPartitionKeys().get(i).getName();
+        if (value.length() > 0) {
+          partKeyToValues.put(col, value);
         }
+        i++;
+      }
+      final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues);
+
+      List<Partition> ret = null;
+      try {
+        ret = executeWithRetry(new Command<List<Partition>>() {
+          @Override
+          List<Partition> run(RawStore ms) throws Exception {
+            return ms.getPartitionsByFilter(db_name, tbl_name, filter, max_parts);
+          }
+        });
+      } catch (MetaException e) {
+        throw e;
+      } catch (Exception e) {
+        assert(e instanceof RuntimeException);
+        throw (RuntimeException)e;
       }
 
-      return matchingParts;
+      return ret;
     }
 
     @Override
-    public List<String> get_partition_names_ps(String db_name, String tbl_name,
-        List<String> part_vals, short max_parts) throws MetaException, TException {
+    public List<String> get_partition_names_ps(final String db_name,
+        final String tbl_name, final List<String> part_vals, final short max_parts)
+        throws MetaException, TException {
       incrementCounter("get_partition_names_ps");
       logStartPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals);
       Table t;
@@ -1783,23 +1802,37 @@ public class HiveMetaStore extends Thrif
         throw new MetaException(e.getMessage());
       }
 
-     List<String> partNames = get_partition_names(db_name, tbl_name, max_parts);
-     List<String> filteredPartNames = new ArrayList<String>();
-
-      for(String name : partNames) {
-        LinkedHashMap<String, String> spec = Warehouse.makeSpecFromName(name);
-        List<String> vals = new ArrayList<String>();
-        // Since we are iterating through a LinkedHashMap, iteration should
-        // return the partition values in the correct order for comparison.
-        for (String val : spec.values()) {
-          vals.add(val);
-        }
-        if (MetaStoreUtils.pvalMatches(part_vals, vals)) {
-          filteredPartNames.add(name);
+      if (part_vals.size() > t.getPartitionKeys().size()) {
+        throw new MetaException("Incorrect number of partition values");
+      }
+      // Create a map from the partition column name to the partition value
+      Map<String, String> partKeyToValues = new LinkedHashMap<String, String>();
+      int i=0;
+      for (String value : part_vals) {
+        String col = t.getPartitionKeys().get(i).getName();
+        if (value.length() > 0) {
+          partKeyToValues.put(col, value);
         }
+        i++;
+      }
+      final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues);
+
+      List<String> ret = null;
+      try {
+        ret = executeWithRetry(new Command<List<String>>() {
+          @Override
+          List<String> run(RawStore ms) throws Exception {
+            return ms.listPartitionNamesByFilter(db_name, tbl_name, filter, max_parts);
+          }
+        });
+      } catch (MetaException e) {
+        throw e;
+      } catch (Exception e) {
+        assert(e instanceof RuntimeException);
+        throw (RuntimeException)e;
       }
 
-      return filteredPartNames;
+      return ret;
     }
 
     @Override

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Tue Oct 19 18:01:23 2010
@@ -767,4 +767,23 @@ public class MetaStoreUtils {
     return TableType.INDEX_TABLE.toString().equals(table.getTableType());
   }
 
+  /**
+   * Given a map of partition column names to values, this creates a filter
+   * string that can be used to call the *byFilter methods
+   * @param m
+   * @return
+   */
+  public static String makeFilterStringFromMap(Map<String, String> m) {
+    StringBuilder filter = new StringBuilder();
+    for (Entry<String, String> e : m.entrySet()) {
+      String col = e.getKey();
+      String val = e.getValue();
+      if (filter.length() == 0) {
+        filter.append(col + "=\"" + val + "\"");
+      } else {
+        filter.append(" and " + col + "=\"" + val + "\"");
+      }
+    }
+    return filter.toString();
+  }
 }

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Tue Oct 19 18:01:23 2010
@@ -968,6 +968,51 @@ public class ObjectStore implements RawS
     return parts;
   }
 
+  private String makeQueryFilterString(MTable mtable, String filter,
+      Map<String, String> params)
+      throws MetaException {
+    StringBuilder queryBuilder = new StringBuilder(
+        "table.tableName == t1 && table.database.name == t2");
+
+    if( filter != null && filter.length() > 0) {
+
+      Table table = convertToTable(mtable);
+
+      CharStream cs = new ANTLRNoCaseStringStream(filter);
+      FilterLexer lexer = new FilterLexer(cs);
+
+      CommonTokenStream tokens = new CommonTokenStream();
+      tokens.setTokenSource (lexer);
+
+      FilterParser parser = new FilterParser(tokens);
+
+      try {
+        parser.filter();
+      } catch(RecognitionException re) {
+        throw new MetaException("Error parsing partition filter : " + re);
+      }
+
+      String jdoFilter = parser.tree.generateJDOFilter(table, params);
+
+      if( jdoFilter.trim().length() > 0 ) {
+        queryBuilder.append(" && ( ");
+        queryBuilder.append(jdoFilter.trim());
+        queryBuilder.append(" )");
+      }
+    }
+
+    return queryBuilder.toString();
+  }
+
+  private String makeParameterDeclarationString(Map<String, String> params) {
+    //Create the parameter declaration string
+    StringBuilder paramDecl = new StringBuilder();
+    for(String key : params.keySet() ) {
+      paramDecl.append(", java.lang.String  " + key);
+    }
+    return paramDecl.toString();
+  }
+
   private List<MPartition> listMPartitionsByFilter(String dbName, String tableName,
       String filter, short maxParts) throws MetaException, NoSuchObjectException{
     boolean success = false;
@@ -983,77 +1028,100 @@ public class ObjectStore implements RawS
         throw new NoSuchObjectException("Specified database/table does not exist : "
             + dbName + "." + tableName);
       }
-
-      StringBuilder queryBuilder = new StringBuilder(
-          "table.tableName == t1 && table.database.name == t2");
-
       Map<String, String> params = new HashMap<String, String>();
+      String queryFilterString =
+        makeQueryFilterString(mtable, filter, params);
 
-      if( filter != null ) {
+      Query query = pm.newQuery(MPartition.class,
+          queryFilterString);
 
-        Table table = convertToTable(mtable);
+      if( maxParts >= 0 ) {
+        //User specified a row limit, set it on the Query
+        query.setRange(0, maxParts);
+      }
 
-        CharStream cs = new ANTLRNoCaseStringStream(filter);
-        FilterLexer lexer = new FilterLexer(cs);
+      LOG.debug("Filter specified is " + filter + "," +
+             " JDOQL filter is " + queryFilterString);
 
-        CommonTokenStream tokens = new CommonTokenStream();
-        tokens.setTokenSource (lexer);
+      params.put("t1", tableName.trim());
+      params.put("t2", dbName.trim());
 
-        FilterParser parser = new FilterParser(tokens);
+      String parameterDeclaration = makeParameterDeclarationString(params);
+      query.declareParameters(parameterDeclaration);
+      query.setOrdering("partitionName ascending");
 
-        try {
-          parser.filter();
-        } catch(RecognitionException re) {
-          throw new MetaException("Error parsing partition filter : " + re);
-        }
+      mparts = (List<MPartition>) query.executeWithMap(params);
 
-        String jdoFilter = parser.tree.generateJDOFilter(table, params);
+      LOG.debug("Done executing query for listMPartitionsByFilter");
+      pm.retrieveAll(mparts);
+      success = commitTransaction();
+      LOG.debug("Done retrieving all objects for listMPartitionsByFilter");
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+    return mparts;
+  }
 
-        if( jdoFilter.trim().length() > 0 ) {
-          queryBuilder.append(" && ( ");
-          queryBuilder.append(jdoFilter.trim());
-          queryBuilder.append(" )");
-        }
+  @Override
+  public List<String> listPartitionNamesByFilter(String dbName, String tableName,
+      String filter, short maxParts) throws MetaException {
+    boolean success = false;
+    List<String> partNames = new ArrayList<String>();
+    try {
+      openTransaction();
+      LOG.debug("Executing listMPartitionsByFilter");
+      dbName = dbName.toLowerCase();
+      tableName = tableName.toLowerCase();
+
+      MTable mtable = getMTable(dbName, tableName);
+      if( mtable == null ) {
+        // To be consistent with the behavior of listPartitionNames, if the
+        // table or db does not exist, we return an empty list
+        return partNames;
       }
+      Map<String, String> params = new HashMap<String, String>();
+      String queryFilterString =
+        makeQueryFilterString(mtable, filter, params);
 
-      Query query = pm.newQuery(MPartition.class,
-          queryBuilder.toString());
+      Query query = pm.newQuery(
+          "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+          + "where " + queryFilterString);
 
       if( maxParts >= 0 ) {
         //User specified a row limit, set it on the Query
         query.setRange(0, maxParts);
       }
 
-      //Create the parameter declaration string
-      StringBuilder paramDecl = new StringBuilder(
-          "java.lang.String t1, java.lang.String t2");
-      for(String key : params.keySet() ) {
-        paramDecl.append(", java.lang.String  " + key);
-      }
-
       LOG.debug("Filter specified is " + filter + "," +
-             " JDOQL filter is " + queryBuilder.toString());
+          " JDOQL filter is " + queryFilterString);
+      LOG.debug("Parms is " + params);
 
       params.put("t1", tableName.trim());
       params.put("t2", dbName.trim());
 
-      query.declareParameters(paramDecl.toString());
+      String parameterDeclaration = makeParameterDeclarationString(params);
+      query.declareParameters(parameterDeclaration);
       query.setOrdering("partitionName ascending");
+      query.setResult("partitionName");
 
-      mparts = (List<MPartition>) query.executeWithMap(params);
+      Collection names = (Collection) query.executeWithMap(params);
+      partNames = new ArrayList<String>();
+      for (Iterator i = names.iterator(); i.hasNext();) {
+        partNames.add((String) i.next());
+      }
 
-      LOG.debug("Done executing query for listMPartitionsByFilter");
-      pm.retrieveAll(mparts);
+      LOG.debug("Done executing query for listMPartitionNamesByFilter");
       success = commitTransaction();
-      LOG.debug("Done retrieving all objects for listMPartitionsByFilter");
+      LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter");
     } finally {
       if (!success) {
         rollbackTransaction();
       }
     }
-    return mparts;
+    return partNames;
   }
-
   public void alterTable(String dbname, String name, Table newTable)
       throws InvalidObjectException, MetaException {
     boolean success = false;

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Tue Oct 19 18:01:23 2010
@@ -106,6 +106,9 @@ public interface RawStore extends Config
   public abstract List<String> listPartitionNames(String db_name,
       String tbl_name, short max_parts) throws MetaException;
 
+  public abstract List<String> listPartitionNamesByFilter(String db_name,
+      String tbl_name, String filter, short max_parts) throws MetaException;
+
   public abstract void alterPartition(String db_name, String tbl_name,
       Partition new_part) throws InvalidObjectException, MetaException;
 

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Tue Oct 19 18:01:23 2010
@@ -223,21 +223,40 @@ public class Warehouse {
    * @return string representation of the partition specification.
    * @throws MetaException
    */
-  public static String makePartName(Map<String, String> spec)
+  public static String makePartPath(Map<String, String> spec)
+      throws MetaException {
+    return makePartName(spec, true);
+  }
+
+  /**
+   * Makes a partition name from a specification
+   * @param spec
+   * @param addTrailingSeperator if true, adds a trailing separator e.g. 'ds=1/'
+   * @return
+   * @throws MetaException
+   */
+  public static String makePartName(Map<String, String> spec,
+      boolean addTrailingSeperator)
       throws MetaException {
     StringBuilder suffixBuf = new StringBuilder();
+    int i = 0;
     for (Entry<String, String> e : spec.entrySet()) {
       if (e.getValue() == null || e.getValue().length() == 0) {
         throw new MetaException("Partition spec is incorrect. " + spec);
       }
+      if (i>0) {
+        suffixBuf.append(Path.SEPARATOR);
+      }
       suffixBuf.append(escapePathName(e.getKey()));
       suffixBuf.append('=');
       suffixBuf.append(escapePathName(e.getValue()));
+      i++;
+    }
+    if (addTrailingSeperator) {
       suffixBuf.append(Path.SEPARATOR);
     }
     return suffixBuf.toString();
   }
-
   /**
    * Given a dynamic partition specification, return the path corresponding to the
    * static part of partition specification. This is basically a copy of makePartName
@@ -296,12 +315,12 @@ public class Warehouse {
 
   public Path getPartitionPath(String dbName, String tableName,
       LinkedHashMap<String, String> pm) throws MetaException {
-    return new Path(getDefaultTablePath(dbName, tableName), makePartName(pm));
+    return new Path(getDefaultTablePath(dbName, tableName), makePartPath(pm));
   }
 
   public Path getPartitionPath(Path tblPath, LinkedHashMap<String, String> pm)
       throws MetaException {
-    return new Path(tblPath, makePartName(pm));
+    return new Path(tblPath, makePartPath(pm));
   }
 
   public boolean isDir(Path f) throws MetaException {

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java Tue Oct 19 18:01:23 2010
@@ -17,17 +17,17 @@
  */
 package org.apache.hadoop.hive.metastore.parser;
 
-import java.io.IOException;
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Stack;
 
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.serde.Constants;
 
-import org.antlr.runtime.ANTLRStringStream;
-import org.antlr.runtime.CharStream;
-
 /**
  * The Class representing the filter as a  binary tree. The tree has TreeNode's
  * at intermediate level and the leaf level nodes are of type LeafNode.
@@ -94,7 +94,7 @@ public class ExpressionTree {
     private TreeNode rhs;
 
     public TreeNode() {
-    } 
+    }
 
     public TreeNode(TreeNode lhs, LogicalOperator andOr, TreeNode rhs) {
       this.lhs = lhs;
@@ -140,22 +140,25 @@ public class ExpressionTree {
     @Override
     public String generateJDOFilter(Table table, Map<String, String> params)
     throws MetaException {
-      int partitionIndex;
-      for(partitionIndex = 0;
-      partitionIndex < table.getPartitionKeys().size();
-      partitionIndex++ ) {
-        if( table.getPartitionKeys().get(partitionIndex).getName().
+
+      int partitionColumnCount = table.getPartitionKeys().size();
+      int partitionColumnIndex;
+      for(partitionColumnIndex = 0;
+      partitionColumnIndex < partitionColumnCount;
+      partitionColumnIndex++ ) {
+        if( table.getPartitionKeys().get(partitionColumnIndex).getName().
             equalsIgnoreCase(keyName)) {
           break;
         }
       }
+      assert (table.getPartitionKeys().size() > 0);
 
-      if( partitionIndex == table.getPartitionKeys().size() ) {
+      if( partitionColumnIndex == table.getPartitionKeys().size() ) {
         throw new MetaException("Specified key <" + keyName +
             "> is not a partitioning key for the table");
       }
 
-      if( ! table.getPartitionKeys().get(partitionIndex).
+      if( ! table.getPartitionKeys().get(partitionColumnIndex).
           getType().equals(Constants.STRING_TYPE_NAME) ) {
         throw new MetaException
         ("Filtering is supported only on partition keys of type string");
@@ -173,17 +176,24 @@ public class ExpressionTree {
               "Value should be on the RHS for LIKE operator : " +
               "Key <" + keyName + ">");
         }
-
-        filter = paramName +
+        else if (operator == Operator.EQUALS) {
+          filter = makeFilterForEquals(keyName, value, paramName, params,
+              partitionColumnIndex, partitionColumnCount);
+        } else {
+          filter = paramName +
           " " + operator.getJdoOp() + " " +
-          " this.values.get(" + partitionIndex + ")";
+          " this.values.get(" + partitionColumnIndex + ")";
+        }
       } else {
-        if( operator == Operator.LIKE ) {
+        if (operator == Operator.LIKE ) {
           //generate this.values.get(i).matches("abc%")
-          filter = " this.values.get(" + partitionIndex + ")."
+          filter = " this.values.get(" + partitionColumnIndex + ")."
               + operator.getJdoOp() + "(" + paramName + ") ";
+        } else if (operator == Operator.EQUALS) {
+          filter = makeFilterForEquals(keyName, value, paramName, params,
+              partitionColumnIndex, partitionColumnCount);
         } else {
-          filter = " this.values.get(" + partitionIndex + ") "
+          filter = " this.values.get(" + partitionColumnIndex + ") "
               + operator.getJdoOp() + " " + paramName;
         }
       }
@@ -192,6 +202,46 @@ public class ExpressionTree {
   }
 
   /**
+   * For equals, we can make the JDO query much faster by filtering based on the
+   * partition name. For a condition like ds="2010-10-01", we can see if there
+   * are any partitions with a name that contains the substring "ds=2010-10-01/"
+   * False matches aren't possible since "=" is escaped for partition names
+   * and the trailing '/' ensures that we won't get a match with ds=2010-10-011
+   *
+   * Two cases to keep in mind: Case with only one partition column (no '/'s)
+   * Case where the partition key column is at the end of the name. (no
+   * tailing '/')
+   *
+   * @param keyName name of the partition col e.g. ds
+   * @param value
+   * @param paramName name of the parameter to use for JDOQL
+   * @param params a map from the parameter name to their values
+   * @return
+   * @throws MetaException
+   */
+  private static String makeFilterForEquals(String keyName, String value,
+      String paramName, Map<String, String> params, int keyPos, int keyCount)
+      throws MetaException {
+    Map<String, String> partKeyToVal = new HashMap<String, String>();
+    partKeyToVal.put(keyName, value);
+    // If a partition has multiple partition keys, we make the assumption that
+    // makePartName with one key will return a substring of the name made
+    // with both all the keys.
+    String escapedNameFragment = Warehouse.makePartName(partKeyToVal, false);
+
+    if (keyCount == 1) {
+      // Case where this is no other partition columns
+      params.put(paramName, escapedNameFragment);
+    } else if (keyPos + 1 == keyCount) {
+      // Case where the partition column is at the end of the name. There will
+      // be a leading '/' but no trailing '/'
+      params.put(paramName, ".*/" + escapedNameFragment);
+    } else {
+      params.put(paramName, ".*" + escapedNameFragment + "/.*");
+    }
+    return "partitionName.matches(" + paramName + ")";
+  }
+  /**
    * The root node for the tree.
    */
   private TreeNode root = null;
@@ -250,6 +300,7 @@ public class ExpressionTree {
       super(input);
     }
 
+    @Override
     public int LA (int i) {
       int returnChar = super.LA (i);
 

Modified: hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Tue Oct 19 18:01:23 2010
@@ -123,6 +123,10 @@ public abstract class TestHiveMetaStore 
       vals3 = new ArrayList<String>(2);
       vals3.add("2008-07-02 14:13:12");
       vals3.add("15");
+      List <String> vals4 = new ArrayList<String>(2);
+      vals4 = new ArrayList<String>(2);
+      vals4.add("2008-07-03 14:13:12");
+      vals4.add("151");
 
       client.dropTable(dbName, tblName);
       silentDropDatabase(dbName);
@@ -163,7 +167,7 @@ public abstract class TestHiveMetaStore 
       tbl.getPartitionKeys().add(
           new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
       tbl.getPartitionKeys().add(
-          new FieldSchema("hr", Constants.INT_TYPE_NAME, ""));
+          new FieldSchema("hr", Constants.STRING_TYPE_NAME, ""));
 
       client.createTable(tbl);
 
@@ -200,7 +204,16 @@ public abstract class TestHiveMetaStore 
       part3.setParameters(new HashMap<String, String>());
       part3.setSd(tbl.getSd());
       part3.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
-      part3.getSd().setLocation(tbl.getSd().getLocation() + "/part2");
+      part3.getSd().setLocation(tbl.getSd().getLocation() + "/part3");
+
+      Partition part4 = new Partition();
+      part4.setDbName(dbName);
+      part4.setTableName(tblName);
+      part4.setValues(vals4);
+      part4.setParameters(new HashMap<String, String>());
+      part4.setSd(tbl.getSd());
+      part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+      part4.getSd().setLocation(tbl.getSd().getLocation() + "/part4");
 
       // check if the partition exists (it shouldn;t)
       boolean exceptionThrown = false;
@@ -218,6 +231,8 @@ public abstract class TestHiveMetaStore 
       assertNotNull("Unable to create partition " + part2, retp2);
       Partition retp3 = client.add_partition(part3);
       assertNotNull("Unable to create partition " + part3, retp3);
+      Partition retp4 = client.add_partition(part4);
+      assertNotNull("Unable to create partition " + part4, retp4);
 
       Partition part_get = client.getPartition(dbName, tblName, part.getValues());
       if(isThriftClient) {
@@ -268,7 +283,7 @@ public abstract class TestHiveMetaStore 
       partialVals.add(vals2.get(1));
 
       partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
-      assertTrue("Should have returned 2 partitions", partial.size() == 2);
+      assertEquals("Should have returned 2 partitions", 2, partial.size());
       assertTrue("Not all parts returned", partial.containsAll(parts));
 
       partNames.clear();
@@ -276,7 +291,7 @@ public abstract class TestHiveMetaStore 
       partNames.add(part3Name);
       partialNames = client.listPartitionNames(dbName, tblName, partialVals,
           (short) -1);
-      assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
+      assertEquals("Should have returned 2 partition names", 2, partialNames.size());
       assertTrue("Not all part names returned", partialNames.containsAll(partNames));
 
       // Verify escaped partition names don't return partitions
@@ -299,15 +314,15 @@ public abstract class TestHiveMetaStore 
 
       // Test append_partition_by_name
       client.appendPartition(dbName, tblName, partName);
-      Partition part4 = client.getPartition(dbName, tblName, part.getValues());
-      assertTrue("Append partition by name failed", part4.getValues().equals(vals));;
-      Path part4Path = new Path(part4.getSd().getLocation());
-      assertTrue(fs.exists(part4Path));
+      Partition part5 = client.getPartition(dbName, tblName, part.getValues());
+      assertTrue("Append partition by name failed", part5.getValues().equals(vals));;
+      Path part5Path = new Path(part5.getSd().getLocation());
+      assertTrue(fs.exists(part5Path));
 
       // Test drop_partition_by_name
       assertTrue("Drop partition by name failed",
           client.dropPartition(dbName, tblName, partName, true));
-      assertFalse(fs.exists(part4Path));
+      assertFalse(fs.exists(part5Path));
 
       // add the partition again so that drop table with a partition can be
       // tested
@@ -1062,9 +1077,9 @@ public abstract class TestHiveMetaStore 
          "(p1=\"p13\" aNd p2=\"p24\")", 4);
       //test for and or precedence
       checkFilter(client, dbName, tblName,
-         "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); 
+         "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1);
       checkFilter(client, dbName, tblName,
-         "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); 
+         "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2);
 
       checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2);
       checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4);

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java Tue Oct 19 18:01:23 2010
@@ -299,7 +299,7 @@ public class StatsTask extends Task<Stat
           PartitionStatistics newPartStats = new PartitionStatistics();
 
           // In that case of a partition, the key for stats temporary store is "rootDir/[dynamic_partition_specs/]%"
-          String partitionID = work.getAggKey() + Warehouse.makePartName(partn.getSpec());
+          String partitionID = work.getAggKey() + Warehouse.makePartPath(partn.getSpec());
 
           String rows = statsAggregator.aggregateStats(partitionID, StatsSetupConst.ROW_COUNT);
           if (rows != null) {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/compact/IndexMetadataChangeTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/compact/IndexMetadataChangeTask.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/compact/IndexMetadataChangeTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/compact/IndexMetadataChangeTask.java Tue Oct 19 18:01:23 2010
@@ -38,7 +38,7 @@ public class IndexMetadataChangeTask ext
 
   @Override
   protected int execute(DriverContext driverContext) {
-    
+
     try {
       Hive db = Hive.get(conf);
       IndexMetadataChangeWork work = this.getWork();
@@ -58,19 +58,20 @@ public class IndexMetadataChangeTask ext
         console.printError("Index table is partitioned, but no partition specified.");
         return 1;
       }
-      
+
       if (work.getPartSpec() != null) {
         Partition part = db.getPartition(tbl, work.getPartSpec(), false);
         if (part == null) {
-          console.printError("Partition " + Warehouse.makePartName(work.getPartSpec()).toString()
+          console.printError("Partition " +
+              Warehouse.makePartName(work.getPartSpec(), false).toString()
               + " does not exist.");
           return 1;
         }
-        
+
         Path url = new Path(part.getDataLocation().toString());
         FileSystem fs = url.getFileSystem(conf);
         FileStatus fstat = fs.getFileStatus(url);
-        
+
         part.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
         db.alterPartition(tbl.getTableName(), part);
       } else {
@@ -93,7 +94,7 @@ public class IndexMetadataChangeTask ext
   public String getName() {
     return "IndexMetadataChangeTask";
   }
-  
+
   @Override
   public int getType() {
     return StageType.DDL;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Tue Oct 19 18:01:23 2010
@@ -912,9 +912,9 @@ public class Hive {
       if(oldPart != null) {
         oldPartPath = oldPart.getPartitionPath();
       }
-      
+
       Path partPath = new Path(tbl.getDataLocation().getPath(),
-          Warehouse.makePartName(partSpec));
+          Warehouse.makePartPath(partSpec));
 
       Path newPartPath = new Path(loadPath.toUri().getScheme(), loadPath
           .toUri().getAuthority(), partPath.toUri().getPath());
@@ -1085,7 +1085,7 @@ public class Hive {
 
     return new Partition(tbl, partition);
   }
-  
+
   public Partition getPartition(Table tbl, Map<String, String> partSpec,
       boolean forceCreate) throws HiveException {
     return getPartition(tbl, partSpec, forceCreate, null);
@@ -1448,7 +1448,7 @@ public class Hive {
   /**
    * Replaces files in the partition with new data set specifed by srcf. Works
    * by moving files.
-   * srcf, destf, and tmppath should resident in the same dfs, but the oldPath can be in a 
+   * srcf, destf, and tmppath should resident in the same dfs, but the oldPath can be in a
    * different dfs.
    *
    * @param srcf
@@ -1462,7 +1462,7 @@ public class Hive {
    */
   static protected void replaceFiles(Path srcf, Path destf, Path oldPath,
       Path tmppath, Configuration conf) throws HiveException {
-    
+
     FileSystem fs = null;
     FsShell fshell = new FsShell();
     fshell.setConf(conf);

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java Tue Oct 19 18:01:23 2010
@@ -42,7 +42,7 @@ public class HiveMetaStoreChecker {
   /**
    * Check the metastore for inconsistencies, data missing in either the
    * metastore or on the dfs.
-   * 
+   *
    * @param dbName
    *          name of the database, if not specified the default will be used.
    * @param tableName
@@ -95,7 +95,7 @@ public class HiveMetaStoreChecker {
 
   /**
    * Check for table directories that aren't in the metastore.
-   * 
+   *
    * @param dbName
    *          Name of the database
    * @param tables
@@ -144,7 +144,7 @@ public class HiveMetaStoreChecker {
   /**
    * Check the metastore for inconsistencies, data missing in either the
    * metastore or on the dfs.
-   * 
+   *
    * @param dbName
    *          Name of the database
    * @param tableName
@@ -189,7 +189,7 @@ public class HiveMetaStoreChecker {
           if (part == null) {
             PartitionResult pr = new PartitionResult();
             pr.setTableName(tableName);
-            pr.setPartitionName(Warehouse.makePartName(map));
+            pr.setPartitionName(Warehouse.makePartPath(map));
             result.getPartitionsNotInMs().add(pr);
           } else {
             parts.add(part);
@@ -204,7 +204,7 @@ public class HiveMetaStoreChecker {
   /**
    * Check the metastore for inconsistencies, data missing in either the
    * metastore or on the dfs.
-   * 
+   *
    * @param table
    *          Table to check
    * @param parts
@@ -259,7 +259,7 @@ public class HiveMetaStoreChecker {
 
   /**
    * Find partitions on the fs that are unknown to the metastore.
-   * 
+   *
    * @param table
    *          Table where the partitions would be located
    * @param partPaths
@@ -301,7 +301,7 @@ public class HiveMetaStoreChecker {
 
   /**
    * Get the partition name from the path.
-   * 
+   *
    * @param tablePath
    *          Path of the table.
    * @param partitionPath
@@ -326,9 +326,9 @@ public class HiveMetaStoreChecker {
   /**
    * Recursive method to get the leaf directories of a base path. Example:
    * base/dir1/dir2 base/dir3
-   * 
+   *
    * This will return dir2 and dir3 but not dir1.
-   * 
+   *
    * @param basePath
    *          Start directory
    * @param allDirs

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Tue Oct 19 18:01:23 2010
@@ -423,7 +423,7 @@ public class Partition implements Serial
   public String toString() {
     String pn = "Invalid Partition";
     try {
-      pn = Warehouse.makePartName(getSpec());
+      pn = Warehouse.makePartName(getSpec(), false);
     } catch (MetaException e) {
       // ignore as we most probably in an exception path already otherwise this
       // error wouldn't occur

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Tue Oct 19 18:01:23 2010
@@ -448,7 +448,8 @@ public class DDLSemanticAnalyzer extends
       Partition part = db.getPartition(baseTbl, partSpec, false);
       if (part == null) {
         throw new HiveException("Partition "
-            + Warehouse.makePartName(partSpec) + " does not exist in table "
+            + Warehouse.makePartName(partSpec, false)
+            + " does not exist in table "
             + baseTbl.getTableName());
       }
       baseTblPartitions.add(part);

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1024341&r1=1024340&r2=1024341&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Tue Oct 19 18:01:23 2010
@@ -3637,7 +3637,7 @@ public class SemanticAnalyzer extends Ba
 
     if (dest_part != null) {
       try {
-        String staticSpec = Warehouse.makePartName(dest_part.getSpec());
+        String staticSpec = Warehouse.makePartPath(dest_part.getSpec());
         fileSinkDesc.setStaticSpec(staticSpec);
       } catch (MetaException e) {
         throw new SemanticException(e);
@@ -7148,7 +7148,7 @@ public class SemanticAnalyzer extends Ba
     for (ExecDriver mrtask: mrtasks) {
       try {
         ContentSummary inputSummary = Utilities.getInputSummary
-          (ctx, (MapredWork)mrtask.getWork(), p);
+          (ctx, mrtask.getWork(), p);
         int numReducers = getNumberOfReducers(mrtask.getWork(), conf);
 
         if (LOG.isDebugEnabled()) {