You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pa...@apache.org on 2011/01/20 04:47:39 UTC

svn commit: r1061112 - in /hive/trunk: ./ metastore/src/java/org/apache/hadoop/hive/metastore/ metastore/src/java/org/apache/hadoop/hive/metastore/parser/ metastore/src/test/org/apache/hadoop/hive/metastore/

Author: pauly
Date: Thu Jan 20 03:47:39 2011
New Revision: 1061112

URL: http://svn.apache.org/viewvc?rev=1061112&view=rev
Log:
HIVE-1862 Revive partition filtering in the Hive MetaStore
(Mac Yang via pauly)


Modified:
    hive/trunk/CHANGES.txt
    hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
    hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java

Modified: hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hive/trunk/CHANGES.txt?rev=1061112&r1=1061111&r2=1061112&view=diff
==============================================================================
--- hive/trunk/CHANGES.txt (original)
+++ hive/trunk/CHANGES.txt Thu Jan 20 03:47:39 2011
@@ -146,6 +146,9 @@ Trunk -  Unreleased
     HIVE-1696  Add delegation token support to metastore
     (Devaraj Das via namit)
 
+    HIVE-1862 Revive partition filtering in the Hive MetaStore
+    (Mac Yang via pauly)
+
   IMPROVEMENTS
 
     HIVE-1692. FetchOperator.getInputFormatFromCache hides causal exception (Philip Zeyliger via cws)

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1061112&r1=1061111&r2=1061112&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Thu Jan 20 03:47:39 2011
@@ -36,6 +36,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.common.metrics.Metrics;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
@@ -191,6 +192,15 @@ public class HiveMetaStore extends Thrif
       updateConnectionURL(hiveConf, null);
 
       createDefaultDB();
+
+      if (hiveConf.getBoolean("hive.metastore.metrics.enabled", false)){
+        try {
+          Metrics.init();
+        } catch (Exception e) {
+          // log exception, but ignore inability to start
+          LOG.error("Exception while instantiating metrics system: " +e);
+        }
+      }
       return true;
     }
 
@@ -437,6 +447,39 @@ public class HiveMetaStore extends Thrif
           + "[" + join(partVals, ",") + "]");
     }
 
+
+    public String startFunction(String function, String extraLogInfo){
+      incrementCounter(function);
+      logStartFunction(function + extraLogInfo);
+      try {
+        Metrics.startScope(function);
+      } catch (IOException e) {
+        LOG.debug("Exception when starting metrics scope" + e);
+      }
+      return function;
+    }
+
+    public String startFunction(String function){
+      return startFunction(function,"");
+    }
+
+    public String startTableFunction(String function, String db, String tbl){
+      return startFunction(function, " : db=" + db + " tbl=" + tbl);
+    }
+
+    public String startPartitionFunction(String function, String db, String tbl, List<String> partVals){
+      return startFunction(function, " : db=" + db + " tbl=" + tbl
+          + "[" + join(partVals, ",") + "]" );
+    }
+
+    public void endFunction(String function){
+      try {
+        Metrics.endScope(function);
+      } catch (IOException e) {
+        LOG.debug("Exception when closing metrics scope" + e);
+      }
+    }
+
     @Override
     public fb_status getStatus() {
       return fb_status.ALIVE;
@@ -476,8 +519,7 @@ public class HiveMetaStore extends Thrif
 
     public void create_database(final Database db)
         throws AlreadyExistsException, InvalidObjectException, MetaException {
-      incrementCounter("create_database");
-      logStartFunction("create_database: "
+      startFunction("create_database",": "
                        + db.getName() + " "
                        + db.getLocationUri() + " "
                        + db.getDescription());
@@ -505,14 +547,14 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("create_database");
       }
     }
 
     public Database get_database(final String name) throws NoSuchObjectException,
         MetaException {
-      incrementCounter("get_database");
-      logStartFunction("get_database: " + name);
-
+      startFunction("get_database",": " + name);
       Database db = null;
       try {
         db = executeWithRetry(new Command<Database>() {
@@ -528,6 +570,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_database");
       }
       return db;
     }
@@ -581,9 +625,10 @@ public class HiveMetaStore extends Thrif
 
     public void drop_database(final String dbName, final boolean deleteData)
         throws NoSuchObjectException, InvalidOperationException, MetaException {
-      incrementCounter("drop_database");
-      logStartFunction("drop_database: " + dbName);
+
+      startFunction("drop_database",": " + dbName);
       if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) {
+        endFunction("drop_database");
         throw new MetaException("Can not drop default database");
       }
 
@@ -604,12 +649,13 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("drop_database");
       }
     }
 
     public List<String> get_databases(final String pattern) throws MetaException {
-      incrementCounter("get_databases");
-      logStartFunction("get_databases: " + pattern);
+      startFunction("get_databases",": " + pattern);
 
       List<String> ret = null;
       try {
@@ -624,13 +670,14 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_databases");
       }
       return ret;
     }
 
     public List<String> get_all_databases() throws MetaException {
-      incrementCounter("get_all_databases");
-      logStartFunction("get_all_databases");
+      startFunction("get_all_databases");
 
       List<String> ret = null;
       try {
@@ -645,6 +692,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_all_databases");
       }
       return ret;
     }
@@ -672,8 +721,7 @@ public class HiveMetaStore extends Thrif
 
     public boolean create_type(final Type type) throws AlreadyExistsException,
         MetaException, InvalidObjectException {
-      incrementCounter("create_type");
-      logStartFunction("create_type: " + type.getName());
+      startFunction("create_type",": " + type.getName());
       Boolean ret = null;
       try {
         ret = executeWithRetry(new Command<Boolean>() {
@@ -692,14 +740,15 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("create_type");
       }
 
       return ret.booleanValue();
     }
 
     public Type get_type(final String name) throws MetaException, NoSuchObjectException {
-      incrementCounter("get_type");
-      logStartFunction("get_type: " + name);
+      startFunction("get_type",": " + name);
 
       Type ret;
       try {
@@ -720,6 +769,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_type");
       }
       return ret;
     }
@@ -751,8 +802,7 @@ public class HiveMetaStore extends Thrif
 
 
     public boolean drop_type(final String name) throws MetaException {
-      incrementCounter("drop_type");
-      logStartFunction("drop_type: " + name);
+      startFunction("drop_type",": " + name);
 
       Boolean ret = null;
       try {
@@ -768,14 +818,16 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("drop_type");
       }
       return ret;
     }
 
     public Map<String, Type> get_type_all(String name) throws MetaException {
-      incrementCounter("get_type_all");
       // TODO Auto-generated method stub
-      logStartFunction("get_type_all: " + name);
+      startFunction("get_type_all",": " + name);
+      endFunction("get_type_all");
       throw new MetaException("Not yet implemented");
     }
 
@@ -848,8 +900,7 @@ public class HiveMetaStore extends Thrif
 
     public void create_table(final Table tbl) throws AlreadyExistsException,
         MetaException, InvalidObjectException {
-      incrementCounter("create_table");
-      logStartFunction("create_table: db=" + tbl.getDbName() + " tbl="
+      startFunction("create_table",": db=" + tbl.getDbName() + " tbl="
           + tbl.getTableName());
       try {
         executeWithRetry(new Command<Boolean>() {
@@ -868,6 +919,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("create_table");
       }
     }
 
@@ -938,8 +991,7 @@ public class HiveMetaStore extends Thrif
 
     public void drop_table(final String dbname, final String name, final boolean deleteData)
         throws NoSuchObjectException, MetaException {
-      incrementCounter("drop_table");
-      logStartTableFunction("drop_table", dbname, name);
+      startTableFunction("drop_table", dbname, name);
 
       try {
         executeWithRetry(new Command<Boolean>() {
@@ -956,6 +1008,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("drop_table");
       }
 
     }
@@ -978,8 +1032,7 @@ public class HiveMetaStore extends Thrif
     public Table get_table(final String dbname, final String name) throws MetaException,
         NoSuchObjectException {
       Table t = null;
-      incrementCounter("get_table");
-      logStartTableFunction("get_table", dbname, name);
+      startTableFunction("get_table", dbname, name);
       try {
         t = executeWithRetry(new Command<Table>() {
           @Override
@@ -999,14 +1052,15 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_table");
       }
       return t;
     }
 
     public boolean set_table_parameters(String dbname, String name,
         Map<String, String> params) throws NoSuchObjectException, MetaException {
-      incrementCounter("set_table_parameters");
-      logStartTableFunction("set_table_parameters", dbname, name);
+      endFunction(startTableFunction("set_table_parameters", dbname, name));
       // TODO Auto-generated method stub
       return false;
     }
@@ -1079,8 +1133,7 @@ public class HiveMetaStore extends Thrif
     public Partition append_partition(final String dbName, final String tableName,
         final List<String> part_vals) throws InvalidObjectException,
         AlreadyExistsException, MetaException {
-      incrementCounter("append_partition");
-      logStartPartitionFunction("append_partition", dbName, tableName, part_vals);
+      startPartitionFunction("append_partition", dbName, tableName, part_vals);
       if (LOG.isDebugEnabled()) {
         for (String part : part_vals) {
           LOG.debug(part);
@@ -1104,6 +1157,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("append_partition");
       }
       return ret;
     }
@@ -1131,7 +1186,7 @@ public class HiveMetaStore extends Thrif
 
     public int add_partitions(final List<Partition> parts) throws MetaException,
         InvalidObjectException, AlreadyExistsException {
-      incrementCounter("add_partition");
+      startFunction("add_partition");
       if (parts.size() == 0) {
         return 0;
       }
@@ -1154,6 +1209,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("add_partition");
       }
       return ret;
     }
@@ -1226,8 +1283,7 @@ public class HiveMetaStore extends Thrif
 
     public Partition add_partition(final Partition part)
         throws InvalidObjectException, AlreadyExistsException, MetaException {
-      incrementCounter("add_partition");
-      logStartTableFunction("add_partition", part.getDbName(), part.getTableName());
+      startTableFunction("add_partition", part.getDbName(), part.getTableName());
 
       Partition ret = null;
       try {
@@ -1246,6 +1302,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("add_partition");
       }
       return ret;
 
@@ -1307,8 +1365,7 @@ public class HiveMetaStore extends Thrif
     public boolean drop_partition(final String db_name, final String tbl_name,
         final List<String> part_vals, final boolean deleteData)
         throws NoSuchObjectException, MetaException, TException {
-      incrementCounter("drop_partition");
-      logStartPartitionFunction("drop_partition", db_name, tbl_name, part_vals);
+      startPartitionFunction("drop_partition", db_name, tbl_name, part_vals);
       LOG.info("Partition values:" + part_vals);
 
       Boolean ret = null;
@@ -1329,6 +1386,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("drop_partition");
       }
       return ret.booleanValue();
 
@@ -1336,8 +1395,7 @@ public class HiveMetaStore extends Thrif
 
     public Partition get_partition(final String db_name, final String tbl_name,
         final List<String> part_vals) throws MetaException, NoSuchObjectException {
-      incrementCounter("get_partition");
-      logStartPartitionFunction("get_partition", db_name, tbl_name, part_vals);
+      startPartitionFunction("get_partition", db_name, tbl_name, part_vals);
 
       Partition ret = null;
       try {
@@ -1354,6 +1412,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_partition");
       }
       return ret;
     }
@@ -1389,8 +1449,7 @@ public class HiveMetaStore extends Thrif
 
     public List<Partition> get_partitions(final String db_name, final String tbl_name,
         final short max_parts) throws NoSuchObjectException, MetaException {
-      incrementCounter("get_partitions");
-      logStartTableFunction("get_partitions", db_name, tbl_name);
+      startTableFunction("get_partitions", db_name, tbl_name);
 
       List<Partition> ret = null;
       try {
@@ -1407,6 +1466,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_partitions");
       }
       return ret;
 
@@ -1443,8 +1504,7 @@ public class HiveMetaStore extends Thrif
 
     public List<String> get_partition_names(final String db_name, final String tbl_name,
         final short max_parts) throws MetaException {
-      incrementCounter("get_partition_names");
-      logStartTableFunction("get_partition_names", db_name, tbl_name);
+      startTableFunction("get_partition_names", db_name, tbl_name);
 
       List<String> ret = null;
       try {
@@ -1459,6 +1519,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_partition_names");
       }
       return ret;
     }
@@ -1483,8 +1545,7 @@ public class HiveMetaStore extends Thrif
     public void alter_partition(final String db_name, final String tbl_name,
         final Partition new_part) throws InvalidOperationException, MetaException,
         TException {
-      incrementCounter("alter_partition");
-      logStartTableFunction("alter_partition", db_name, tbl_name);
+      startTableFunction("alter_partition", db_name, tbl_name);
       LOG.info("Partition values:" + new_part.getValues());
 
       try {
@@ -1504,21 +1565,22 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("alter_partition");
       }
       return;
     }
 
     public boolean create_index(Index index_def)
         throws IndexAlreadyExistsException, MetaException {
-      incrementCounter("create_index");
+      endFunction(startFunction("create_index"));
       // TODO Auto-generated method stub
       throw new MetaException("Not yet implemented");
     }
 
     public void alter_index(final String dbname, final String base_table_name, final String index_name, final Index newIndex)
         throws InvalidOperationException, MetaException {
-      incrementCounter("alter_index");
-      logStartFunction("alter_index: db=" + dbname + " base_tbl=" + base_table_name
+      startFunction("alter_index", ": db=" + dbname + " base_tbl=" + base_table_name
           + " idx=" + index_name + " newidx=" + newIndex.getIndexName());
       newIndex.putToParameters(Constants.DDL_TIME, Long.toString(System
           .currentTimeMillis() / 1000));
@@ -1538,20 +1600,20 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("alter_index");
       }
       return;
     }
 
     public String getVersion() throws TException {
-      incrementCounter("getVersion");
-      logStartFunction("getVersion");
+      endFunction(startFunction("getVersion"));
       return "3.0";
     }
 
     public void alter_table(final String dbname, final String name, final Table newTable)
         throws InvalidOperationException, MetaException {
-      incrementCounter("alter_table");
-      logStartFunction("alter_table: db=" + dbname + " tbl=" + name
+      startFunction("alter_table",": db=" + dbname + " tbl=" + name
           + " newtbl=" + newTable.getTableName());
 
       // Update the time if it hasn't been specified.
@@ -1561,6 +1623,7 @@ public class HiveMetaStore extends Thrif
             .currentTimeMillis() / 1000));
       }
 
+
       try {
         executeWithRetry(new Command<Boolean>() {
           @Override
@@ -1576,13 +1639,14 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("alter_table");
       }
     }
 
     public List<String> get_tables(final String dbname, final String pattern)
         throws MetaException {
-      incrementCounter("get_tables");
-      logStartFunction("get_tables: db=" + dbname + " pat=" + pattern);
+      startFunction("get_tables",": db=" + dbname + " pat=" + pattern);
 
       List<String> ret;
       try {
@@ -1597,13 +1661,14 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_tables");
       }
       return ret;
     }
 
     public List<String> get_all_tables(final String dbname) throws MetaException {
-      incrementCounter("get_all_tables");
-      logStartFunction("get_all_tables: db=" + dbname);
+      startFunction("get_all_tables",": db=" + dbname);
 
       List<String> ret;
       try {
@@ -1618,35 +1683,40 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_all_tables");
       }
       return ret;
     }
 
     public List<FieldSchema> get_fields(String db, String tableName)
         throws MetaException, UnknownTableException, UnknownDBException {
-      incrementCounter("get_fields");
-      logStartFunction("get_fields: db=" + db + "tbl=" + tableName);
+      startFunction("get_fields",": db=" + db + "tbl=" + tableName);
       String[] names = tableName.split("\\.");
       String base_table_name = names[0];
 
       Table tbl;
       try {
-        tbl = get_table(db, base_table_name);
-      } catch (NoSuchObjectException e) {
-        throw new UnknownTableException(e.getMessage());
-      }
-      boolean getColsFromSerDe = SerDeUtils.shouldGetColsFromSerDe(
-        tbl.getSd().getSerdeInfo().getSerializationLib());
-      if (!getColsFromSerDe) {
-        return tbl.getSd().getCols();
-      } else {
         try {
-          Deserializer s = MetaStoreUtils.getDeserializer(hiveConf, tbl);
-          return MetaStoreUtils.getFieldsFromDeserializer(tableName, s);
-        } catch (SerDeException e) {
-          StringUtils.stringifyException(e);
-          throw new MetaException(e.getMessage());
+          tbl = get_table(db, base_table_name);
+        } catch (NoSuchObjectException e) {
+          throw new UnknownTableException(e.getMessage());
+        }
+        boolean getColsFromSerDe = SerDeUtils.shouldGetColsFromSerDe(
+            tbl.getSd().getSerdeInfo().getSerializationLib());
+        if (!getColsFromSerDe) {
+          return tbl.getSd().getCols();
+        } else {
+          try {
+            Deserializer s = MetaStoreUtils.getDeserializer(hiveConf, tbl);
+            return MetaStoreUtils.getFieldsFromDeserializer(tableName, s);
+          } catch (SerDeException e) {
+            StringUtils.stringifyException(e);
+            throw new MetaException(e.getMessage());
+          }
         }
+      } finally {
+        endFunction("get_fields");
       }
     }
 
@@ -1665,29 +1735,32 @@ public class HiveMetaStore extends Thrif
      */
     public List<FieldSchema> get_schema(String db, String tableName)
         throws MetaException, UnknownTableException, UnknownDBException {
-      incrementCounter("get_schema");
-      logStartFunction("get_schema: db=" + db + "tbl=" + tableName);
-      String[] names = tableName.split("\\.");
-      String base_table_name = names[0];
-
-      Table tbl;
+      startFunction("get_schema",": db=" + db + "tbl=" + tableName);
       try {
-        tbl = get_table(db, base_table_name);
-      } catch (NoSuchObjectException e) {
-        throw new UnknownTableException(e.getMessage());
-      }
-      List<FieldSchema> fieldSchemas = get_fields(db, base_table_name);
+        String[] names = tableName.split("\\.");
+        String base_table_name = names[0];
 
-      if (tbl == null || fieldSchemas == null) {
-        throw new UnknownTableException(tableName + " doesn't exist");
-      }
+        Table tbl;
+        try {
+          tbl = get_table(db, base_table_name);
+        } catch (NoSuchObjectException e) {
+          throw new UnknownTableException(e.getMessage());
+        }
+        List<FieldSchema> fieldSchemas = get_fields(db, base_table_name);
 
-      if (tbl.getPartitionKeys() != null) {
-        // Combine the column field schemas and the partition keys to create the
-        // whole schema
-        fieldSchemas.addAll(tbl.getPartitionKeys());
+        if (tbl == null || fieldSchemas == null) {
+          throw new UnknownTableException(tableName + " doesn't exist");
+        }
+
+        if (tbl.getPartitionKeys() != null) {
+          // Combine the column field schemas and the partition keys to create the
+          // whole schema
+          fieldSchemas.addAll(tbl.getPartitionKeys());
+        }
+        return fieldSchemas;
+      } finally {
+        endFunction("get_schema");
       }
-      return fieldSchemas;
     }
 
     public String getCpuProfile(int profileDurationInSec) throws TException {
@@ -1702,28 +1775,31 @@ public class HiveMetaStore extends Thrif
      */
     public String get_config_value(String name, String defaultValue)
         throws TException, ConfigValSecurityException {
-      incrementCounter("get_config_value");
-      logStartFunction("get_config_value: name=" + name + " defaultValue="
+      startFunction("get_config_value",": name=" + name + " defaultValue="
           + defaultValue);
-      if (name == null) {
-        return defaultValue;
-      }
-      // Allow only keys that start with hive.*, hdfs.*, mapred.* for security
-      // i.e. don't allow access to db password
-      if (!Pattern.matches("(hive|hdfs|mapred).*", name)) {
-        throw new ConfigValSecurityException("For security reasons, the "
-            + "config key " + name + " cannot be accessed");
-      }
-
-      String toReturn = defaultValue;
       try {
-        toReturn = hiveConf.get(name, defaultValue);
-      } catch (RuntimeException e) {
-        LOG.error(threadLocalId.get().toString() + ": "
-            + "RuntimeException thrown in get_config_value - msg: "
-            + e.getMessage() + " cause: " + e.getCause());
+        if (name == null) {
+          return defaultValue;
+        }
+        // Allow only keys that start with hive.*, hdfs.*, mapred.* for security
+        // i.e. don't allow access to db password
+        if (!Pattern.matches("(hive|hdfs|mapred).*", name)) {
+          throw new ConfigValSecurityException("For security reasons, the "
+              + "config key " + name + " cannot be accessed");
+        }
+
+        String toReturn = defaultValue;
+        try {
+          toReturn = hiveConf.get(name, defaultValue);
+        } catch (RuntimeException e) {
+          LOG.error(threadLocalId.get().toString() + ": "
+              + "RuntimeException thrown in get_config_value - msg: "
+              + e.getMessage() + " cause: " + e.getCause());
+        }
+        return toReturn;
+      } finally {
+        endFunction("get_config_value");
       }
-      return toReturn;
     }
 
     private List<String> getPartValsFromName(RawStore ms, String dbName, String tblName,
@@ -1772,8 +1848,7 @@ public class HiveMetaStore extends Thrif
     public Partition get_partition_by_name(final String db_name,final String tbl_name,
         final String part_name) throws MetaException, NoSuchObjectException, TException {
 
-      incrementCounter("get_partition_by_name");
-      logStartFunction("get_partition_by_name: db=" + db_name + " tbl="
+      startFunction("get_partition_by_name",": db=" + db_name + " tbl="
           + tbl_name + " part=" + part_name);
 
       Partition ret = null;
@@ -1794,6 +1869,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_partition_by_name");
       }
       return ret;
     }
@@ -1801,8 +1878,7 @@ public class HiveMetaStore extends Thrif
     public Partition append_partition_by_name(final String db_name, final String tbl_name,
         final String part_name) throws InvalidObjectException,
         AlreadyExistsException, MetaException, TException {
-      incrementCounter("append_partition_by_name");
-      logStartFunction("append_partition_by_name: db=" + db_name + " tbl="
+      startFunction("append_partition_by_name",": db=" + db_name + " tbl="
           + tbl_name + " part=" + part_name);
 
       Partition ret = null;
@@ -1825,6 +1901,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("append_partition_by_name");
       }
       return ret;
     }
@@ -1848,8 +1926,7 @@ public class HiveMetaStore extends Thrif
     public boolean drop_partition_by_name(final String db_name, final String tbl_name,
         final String part_name, final boolean deleteData) throws NoSuchObjectException,
         MetaException, TException {
-      incrementCounter("drop_partition_by_name");
-      logStartFunction("drop_partition_by_name: db=" + db_name + " tbl="
+      startFunction("drop_partition_by_name",": db=" + db_name + " tbl="
           + tbl_name + " part=" + part_name);
 
       Boolean ret = null;
@@ -1870,6 +1947,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("drop_partition_by_name");
       }
 
       return ret.booleanValue();
@@ -1974,8 +2053,7 @@ public class HiveMetaStore extends Thrif
     @Override
     public Index add_index(final Index newIndex, final Table indexTable) throws InvalidObjectException,
         AlreadyExistsException, MetaException, TException {
-      incrementCounter("add_partition");
-      logStartFunction("add_index: db=" + newIndex.getDbName() + " tbl="
+      startFunction("add_index",": db=" + newIndex.getDbName() + " tbl="
           + newIndex.getOrigTableName() + " index=" + newIndex.getIndexName());
       Index ret = null;
       try {
@@ -1994,6 +2072,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("add_index");
       }
       return ret;
     }
@@ -2059,8 +2139,7 @@ public class HiveMetaStore extends Thrif
     public boolean drop_index_by_name(final String dbName, final String tblName,
         final String indexName, final boolean deleteData) throws NoSuchObjectException,
         MetaException, TException {
-      incrementCounter("drop_index_by_name");
-      logStartFunction("drop_index_by_name: db=" + dbName + " tbl="
+      startFunction("drop_index_by_name",": db=" + dbName + " tbl="
           + tblName + " index=" + indexName);
 
       Boolean ret = null;
@@ -2081,6 +2160,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("drop_index_by_name");
       }
 
       return ret.booleanValue();
@@ -2137,8 +2218,7 @@ public class HiveMetaStore extends Thrif
         final String indexName) throws MetaException, NoSuchObjectException,
         TException {
 
-      incrementCounter("get_index_by_name");
-      logStartFunction("get_index_by_name: db=" + dbName + " tbl="
+      startFunction("get_index_by_name",": db=" + dbName + " tbl="
           + tblName + " index=" + indexName);
 
       Index ret = null;
@@ -2159,6 +2239,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("drop_index_by_name");
       }
       return ret;
     }
@@ -2178,8 +2260,7 @@ public class HiveMetaStore extends Thrif
     @Override
     public List<String> get_index_names(final String dbName, final String tblName,
         final short maxIndexes) throws MetaException, TException {
-      incrementCounter("get_index_names");
-      logStartTableFunction("get_index_names", dbName, tblName);
+      startTableFunction("get_index_names", dbName, tblName);
 
       List<String> ret = null;
       try {
@@ -2194,6 +2275,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_index_names");
       }
       return ret;
     }
@@ -2202,8 +2285,7 @@ public class HiveMetaStore extends Thrif
     public List<Index> get_indexes(final String dbName, final String tblName,
         final short maxIndexes) throws NoSuchObjectException, MetaException,
         TException {
-      incrementCounter("get_indexes");
-      logStartTableFunction("get_indexes", dbName, tblName);
+      startTableFunction("get_indexes", dbName, tblName);
 
       List<Index> ret = null;
       try {
@@ -2218,6 +2300,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_indexes");
       }
       return ret;
     }
@@ -2226,8 +2310,7 @@ public class HiveMetaStore extends Thrif
     public List<Partition> get_partitions_by_filter(final String dbName,
         final String tblName, final String filter, final short maxParts)
         throws MetaException, NoSuchObjectException, TException {
-      incrementCounter("get_partitions_by_filter");
-      logStartTableFunction("get_partitions_by_filter", dbName, tblName);
+      startTableFunction("get_partitions_by_filter", dbName, tblName);
 
       List<Partition> ret = null;
       try {
@@ -2244,6 +2327,8 @@ public class HiveMetaStore extends Thrif
       } catch (Exception e) {
         assert(e instanceof RuntimeException);
         throw (RuntimeException)e;
+      } finally {
+        endFunction("get_partitions_by_filter");
       }
       return ret;
     }

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1061112&r1=1061111&r2=1061112&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Thu Jan 20 03:47:39 2011
@@ -1266,8 +1266,53 @@ public class ObjectStore implements RawS
 
   private List<MPartition> listMPartitionsByFilter(String dbName, String tableName,
       String filter, short maxParts) throws MetaException, NoSuchObjectException{
-    throw new RuntimeException("listMPartitionsByFilter is not supported " +
-        "due to a JDO library downgrade");
+    boolean success = false;
+    List<MPartition> mparts = null;
+    try {
+      openTransaction();
+      LOG.debug("Executing listMPartitionsByFilter");
+      dbName = dbName.toLowerCase();
+      tableName = tableName.toLowerCase();
+
+      MTable mtable = getMTable(dbName, tableName);
+      if( mtable == null ) {
+        throw new NoSuchObjectException("Specified database/table does not exist : "
+            + dbName + "." + tableName);
+      }
+      Map<String, String> params = new HashMap<String, String>();
+      String queryFilterString =
+        makeQueryFilterString(mtable, filter, params);
+
+      Query query = pm.newQuery(MPartition.class,
+          queryFilterString);
+
+      if( maxParts >= 0 ) {
+        //User specified a row limit, set it on the Query
+        query.setRange(0, maxParts);
+      }
+
+      LOG.debug("Filter specified is " + filter + "," +
+             " JDOQL filter is " + queryFilterString);
+
+      params.put("t1", tableName.trim());
+      params.put("t2", dbName.trim());
+
+      String parameterDeclaration = makeParameterDeclarationString(params);
+      query.declareParameters(parameterDeclaration);
+      query.setOrdering("partitionName ascending");
+
+      mparts = (List<MPartition>) query.executeWithMap(params);
+
+      LOG.debug("Done executing query for listMPartitionsByFilter");
+      pm.retrieveAll(mparts);
+      success = commitTransaction();
+      LOG.debug("Done retrieving all objects for listMPartitionsByFilter");
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+    return mparts;
   }
 
   @Override

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java?rev=1061112&r1=1061111&r2=1061112&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java Thu Jan 20 03:47:39 2011
@@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.serde.Constants;
+import org.apache.hadoop.hive.common.FileUtils;
 
 /**
  * The Class representing the filter as a  binary tree. The tree has TreeNode's
@@ -168,6 +169,17 @@ public class ExpressionTree {
       params.put(paramName, value);
       String filter;
 
+      String keyEqual = FileUtils.escapePathName(keyName) + "=";
+      int keyEqualLength = keyEqual.length();
+      String valString;
+      // partitionname ==>  (key=value/)*(key=value)
+      if (partitionColumnIndex == (partitionColumnCount - 1)) {
+        valString = "partitionName.substring(partitionName.indexOf(\"" + keyEqual + "\")+" + keyEqualLength + ")";
+      }
+      else {
+        valString = "partitionName.substring(partitionName.indexOf(\"" + keyEqual + "\")+" + keyEqualLength + ").substring(0, partitionName.substring(partitionName.indexOf(\"" + keyEqual + "\")+" + keyEqualLength + ").indexOf(\"/\"))";
+      }
+
       //Handle "a > 10" and "10 > a" appropriately
       if (isReverseOrder){
         //For LIKE, the value should be on the RHS
@@ -181,19 +193,18 @@ public class ExpressionTree {
               partitionColumnIndex, partitionColumnCount);
         } else {
           filter = paramName +
-          " " + operator.getJdoOp() + " " +
-          " this.values.get(" + partitionColumnIndex + ")";
+          " " + operator.getJdoOp() + " " + valString;
         }
       } else {
         if (operator == Operator.LIKE ) {
           //generate this.values.get(i).matches("abc%")
-          filter = " this.values.get(" + partitionColumnIndex + ")."
+          filter = " " + valString + "."
               + operator.getJdoOp() + "(" + paramName + ") ";
         } else if (operator == Operator.EQUALS) {
           filter = makeFilterForEquals(keyName, value, paramName, params,
               partitionColumnIndex, partitionColumnCount);
         } else {
-          filter = " this.values.get(" + partitionColumnIndex + ") "
+          filter = " " + valString + " "
               + operator.getJdoOp() + " " + paramName;
         }
       }

Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=1061112&r1=1061111&r2=1061112&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Thu Jan 20 03:47:39 2011
@@ -990,8 +990,306 @@ public abstract class TestHiveMetaStore 
    * @throws Exception
    */
   public void testPartitionFilter() throws Exception {
-    // Tests listMPartitionsByFilter() introduced by HIVE-1609. Temporarily
-    // disabled until issues identified by HIVE-1853 are resolved.
+      String dbName = "filterdb";
+      String tblName = "filtertbl";
+
+      List<String> vals = new ArrayList<String>(3);
+      vals.add("p11");
+      vals.add("p21");
+      vals.add("p31");
+      List <String> vals2 = new ArrayList<String>(3);
+      vals2.add("p11");
+      vals2.add("p22");
+      vals2.add("p31");
+      List <String> vals3 = new ArrayList<String>(3);
+      vals3.add("p12");
+      vals3.add("p21");
+      vals3.add("p31");
+      List <String> vals4 = new ArrayList<String>(3);
+      vals4.add("p12");
+      vals4.add("p23");
+      vals4.add("p31");
+      List <String> vals5 = new ArrayList<String>(3);
+      vals5.add("p13");
+      vals5.add("p24");
+      vals5.add("p31");
+      List <String> vals6 = new ArrayList<String>(3);
+      vals6.add("p13");
+      vals6.add("p25");
+      vals6.add("p31");
+
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+
+      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
+      cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
+      cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, ""));
+
+      ArrayList<FieldSchema> partCols = new ArrayList<FieldSchema>(3);
+      partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
+      partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, ""));
+      partCols.add(new FieldSchema("p3", Constants.INT_TYPE_NAME, ""));
+
+      Table tbl = new Table();
+      tbl.setDbName(dbName);
+      tbl.setTableName(tblName);
+      StorageDescriptor sd = new StorageDescriptor();
+      tbl.setSd(sd);
+      sd.setCols(cols);
+      sd.setCompressed(false);
+      sd.setNumBuckets(1);
+      sd.setParameters(new HashMap<String, String>());
+      sd.setBucketCols(new ArrayList<String>());
+      sd.setSerdeInfo(new SerDeInfo());
+      sd.getSerdeInfo().setName(tbl.getTableName());
+      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+      sd.getSerdeInfo().getParameters()
+          .put(Constants.SERIALIZATION_FORMAT, "1");
+      sd.setSortCols(new ArrayList<Order>());
+
+      tbl.setPartitionKeys(partCols);
+      client.createTable(tbl);
+
+      tbl = client.getTable(dbName, tblName);
+
+      add_partition(client, tbl, vals, "part1");
+      add_partition(client, tbl, vals2, "part2");
+      add_partition(client, tbl, vals3, "part3");
+      add_partition(client, tbl, vals4, "part4");
+      add_partition(client, tbl, vals5, "part5");
+      add_partition(client, tbl, vals6, "part6");
+
+      checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2);
+      checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
+      checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1);
+      checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1);
+      checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3);
+      checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+
+      checkFilter(client, dbName, tblName,
+          "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3);
+      checkFilter(client, dbName, tblName,
+         "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " +
+         "(p1=\"p13\" aNd p2=\"p24\")", 4);
+      //test for and or precedence
+      checkFilter(client, dbName, tblName,
+         "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1);
+      checkFilter(client, dbName, tblName,
+         "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2);
+
+      checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4);
+      checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4);
+      checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 4);
+      checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6);
+      checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1);
+
+      //Test for setting the maximum partition count
+      List<Partition> partitions = client.listPartitionsByFilter(dbName,
+          tblName, "p1 >= \"p12\"", (short) 2);
+      assertEquals("User specified row limit for partitions",
+          2, partitions.size());
+
+      //Negative tests
+      Exception me = null;
+      try {
+        client.listPartitionsByFilter(dbName,
+            tblName, "p3 >= \"p12\"", (short) -1);
+      } catch(MetaException e) {
+        me = e;
+      }
+      assertNotNull(me);
+      assertTrue("Filter on int partition key", me.getMessage().contains(
+            "Filtering is supported only on partition keys of type string"));
+
+      me = null;
+      try {
+        client.listPartitionsByFilter(dbName,
+            tblName, "c1 >= \"p12\"", (short) -1);
+      } catch(MetaException e) {
+        me = e;
+      }
+      assertNotNull(me);
+      assertTrue("Filter on invalid key", me.getMessage().contains(
+            "<c1> is not a partitioning key for the table"));
+
+      me = null;
+      try {
+        client.listPartitionsByFilter(dbName,
+            tblName, "c1 >= ", (short) -1);
+      } catch(MetaException e) {
+        me = e;
+      }
+      assertNotNull(me);
+      assertTrue("Invalid filter string", me.getMessage().contains(
+            "Error parsing partition filter"));
+
+      me = null;
+      try {
+        client.listPartitionsByFilter("invDBName",
+            "invTableName", "p1 = \"p11\"", (short) -1);
+      } catch(NoSuchObjectException e) {
+        me = e;
+      }
+      assertNotNull(me);
+      assertTrue("NoSuchObject exception", me.getMessage().contains(
+            "database/table does not exist"));
+
+      client.dropTable(dbName, tblName);
+      client.dropDatabase(dbName);
+  }
+
+  /**
+   * Test filtering on table with single partition
+   * @throws Exception
+   */
+  public void testFilterSinglePartition() throws Exception {
+      String dbName = "filterdb";
+      String tblName = "filtertbl";
+
+      List<String> vals = new ArrayList<String>(1);
+      vals.add("p11");
+      List <String> vals2 = new ArrayList<String>(1);
+      vals2.add("p12");
+      List <String> vals3 = new ArrayList<String>(1);
+      vals3.add("p13");
+
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+
+      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
+      cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
+      cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, ""));
+
+      ArrayList<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
+      partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
+
+      Table tbl = new Table();
+      tbl.setDbName(dbName);
+      tbl.setTableName(tblName);
+      StorageDescriptor sd = new StorageDescriptor();
+      tbl.setSd(sd);
+      sd.setCols(cols);
+      sd.setCompressed(false);
+      sd.setNumBuckets(1);
+      sd.setParameters(new HashMap<String, String>());
+      sd.setBucketCols(new ArrayList<String>());
+      sd.setSerdeInfo(new SerDeInfo());
+      sd.getSerdeInfo().setName(tbl.getTableName());
+      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+      sd.getSerdeInfo().getParameters()
+          .put(Constants.SERIALIZATION_FORMAT, "1");
+      sd.setSortCols(new ArrayList<Order>());
+
+      tbl.setPartitionKeys(partCols);
+      client.createTable(tbl);
+
+      tbl = client.getTable(dbName, tblName);
+
+      add_partition(client, tbl, vals, "part1");
+      add_partition(client, tbl, vals2, "part2");
+      add_partition(client, tbl, vals3, "part3");
+
+      checkFilter(client, dbName, tblName, "p1 = \"p12\"", 1);
+      checkFilter(client, dbName, tblName, "p1 < \"p12\"", 1);
+      checkFilter(client, dbName, tblName, "p1 > \"p12\"", 1);
+      checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 3);
+      checkFilter(client, dbName, tblName, "p1 like \"p.*2\"", 1);
+
+      client.dropTable(dbName, tblName);
+      client.dropDatabase(dbName);
+  }
+
+  /**
+   * Test filtering based on the value of the last partition
+   * @throws Exception
+   */
+  public void testFilterLastPartition() throws Exception {
+      String dbName = "filterdb";
+      String tblName = "filtertbl";
+
+      List<String> vals = new ArrayList<String>(2);
+      vals.add("p11");
+      vals.add("p21");
+      List <String> vals2 = new ArrayList<String>(2);
+      vals2.add("p11");
+      vals2.add("p22");
+      List <String> vals3 = new ArrayList<String>(2);
+      vals3.add("p12");
+      vals3.add("p21");
+
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+
+      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
+      cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
+      cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, ""));
+
+      ArrayList<FieldSchema> partCols = new ArrayList<FieldSchema>(2);
+      partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
+      partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, ""));
+
+      Table tbl = new Table();
+      tbl.setDbName(dbName);
+      tbl.setTableName(tblName);
+      StorageDescriptor sd = new StorageDescriptor();
+      tbl.setSd(sd);
+      sd.setCols(cols);
+      sd.setCompressed(false);
+      sd.setNumBuckets(1);
+      sd.setParameters(new HashMap<String, String>());
+      sd.setBucketCols(new ArrayList<String>());
+      sd.setSerdeInfo(new SerDeInfo());
+      sd.getSerdeInfo().setName(tbl.getTableName());
+      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+      sd.getSerdeInfo().getParameters()
+          .put(Constants.SERIALIZATION_FORMAT, "1");
+      sd.setSortCols(new ArrayList<Order>());
+
+      tbl.setPartitionKeys(partCols);
+      client.createTable(tbl);
+
+      tbl = client.getTable(dbName, tblName);
+
+      add_partition(client, tbl, vals, "part1");
+      add_partition(client, tbl, vals2, "part2");
+      add_partition(client, tbl, vals3, "part3");
+
+      checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
+      checkFilter(client, dbName, tblName, "p2 < \"p23\"", 3);
+      checkFilter(client, dbName, tblName, "p2 > \"p21\"", 1);
+      checkFilter(client, dbName, tblName, "p2 >= \"p21\"", 3);
+      checkFilter(client, dbName, tblName, "p2 <= \"p21\"", 2);
+      checkFilter(client, dbName, tblName, "p2 <> \"p12\"", 3);
+      checkFilter(client, dbName, tblName, "p2 like \"p2.*\"", 3);
+      checkFilter(client, dbName, tblName, "p2 like \"p.*2\"", 1);
+
+      client.dropTable(dbName, tblName);
+      client.dropDatabase(dbName);
+  }
+
+  private void checkFilter(HiveMetaStoreClient client, String dbName,
+        String tblName, String filter, int expectedCount)
+        throws MetaException, NoSuchObjectException, TException {
+    List<Partition> partitions = client.listPartitionsByFilter(dbName,
+            tblName, filter, (short) -1);
+
+    assertEquals("Partition count expected for filter " + filter,
+            expectedCount, partitions.size());
   }
 
   private void add_partition(HiveMetaStoreClient client, Table table,