You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jv...@apache.org on 2010/09/21 23:13:03 UTC

svn commit: r999644 [2/2] - in /hadoop/hive/trunk: ./ metastore/ metastore/if/ metastore/src/gen-cpp/ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ metastore/src/gen-php/ metastore/src/gen-py/hive_metastore/ metastore/src/java/org/ap...

Modified: hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php (original)
+++ hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php Tue Sep 21 21:13:02 2010
@@ -38,6 +38,7 @@ interface ThriftHiveMetastoreIf extends 
   public function get_partition_names($db_name, $tbl_name, $max_parts);
   public function get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts);
   public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts);
+  public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
   public function alter_partition($db_name, $tbl_name, $new_part);
   public function get_config_value($name, $defaultValue);
   public function partition_name_to_vals($part_name);
@@ -1673,6 +1674,66 @@ class ThriftHiveMetastoreClient extends 
     throw new Exception("get_partition_names_ps failed: unknown result");
   }
 
+  public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts)
+  {
+    $this->send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
+    return $this->recv_get_partitions_by_filter();
+  }
+
+  public function send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts)
+  {
+    $args = new metastore_ThriftHiveMetastore_get_partitions_by_filter_args();
+    $args->db_name = $db_name;
+    $args->tbl_name = $tbl_name;
+    $args->filter = $filter;
+    $args->max_parts = $max_parts;
+    $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_partitions_by_filter', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_partitions_by_filter', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_get_partitions_by_filter()
+  {
+    $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'metastore_ThriftHiveMetastore_get_partitions_by_filter_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new metastore_ThriftHiveMetastore_get_partitions_by_filter_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    throw new Exception("get_partitions_by_filter failed: unknown result");
+  }
+
   public function alter_partition($db_name, $tbl_name, $new_part)
   {
     $this->send_alter_partition($db_name, $tbl_name, $new_part);
@@ -8536,6 +8597,282 @@ class metastore_ThriftHiveMetastore_get_
 
 }
 
+class metastore_ThriftHiveMetastore_get_partitions_by_filter_args {
+  static $_TSPEC;
+
+  public $db_name = null;
+  public $tbl_name = null;
+  public $filter = null;
+  public $max_parts = -1;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'db_name',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'tbl_name',
+          'type' => TType::STRING,
+          ),
+        3 => array(
+          'var' => 'filter',
+          'type' => TType::STRING,
+          ),
+        4 => array(
+          'var' => 'max_parts',
+          'type' => TType::I16,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['db_name'])) {
+        $this->db_name = $vals['db_name'];
+      }
+      if (isset($vals['tbl_name'])) {
+        $this->tbl_name = $vals['tbl_name'];
+      }
+      if (isset($vals['filter'])) {
+        $this->filter = $vals['filter'];
+      }
+      if (isset($vals['max_parts'])) {
+        $this->max_parts = $vals['max_parts'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_partitions_by_filter_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->db_name);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->tbl_name);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->filter);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 4:
+          if ($ftype == TType::I16) {
+            $xfer += $input->readI16($this->max_parts);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_by_filter_args');
+    if ($this->db_name !== null) {
+      $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1);
+      $xfer += $output->writeString($this->db_name);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->tbl_name !== null) {
+      $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2);
+      $xfer += $output->writeString($this->tbl_name);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->filter !== null) {
+      $xfer += $output->writeFieldBegin('filter', TType::STRING, 3);
+      $xfer += $output->writeString($this->filter);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->max_parts !== null) {
+      $xfer += $output->writeFieldBegin('max_parts', TType::I16, 4);
+      $xfer += $output->writeI16($this->max_parts);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class metastore_ThriftHiveMetastore_get_partitions_by_filter_result {
+  static $_TSPEC;
+
+  public $success = null;
+  public $o1 = null;
+  public $o2 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::LST,
+          'etype' => TType::STRUCT,
+          'elem' => array(
+            'type' => TType::STRUCT,
+            'class' => 'metastore_Partition',
+            ),
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => 'metastore_MetaException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => 'metastore_NoSuchObjectException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_partitions_by_filter_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::LST) {
+            $this->success = array();
+            $_size217 = 0;
+            $_etype220 = 0;
+            $xfer += $input->readListBegin($_etype220, $_size217);
+            for ($_i221 = 0; $_i221 < $_size217; ++$_i221)
+            {
+              $elem222 = null;
+              $elem222 = new metastore_Partition();
+              $xfer += $elem222->read($input);
+              $this->success []= $elem222;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new metastore_MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->o2 = new metastore_NoSuchObjectException();
+            $xfer += $this->o2->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_by_filter_result');
+    if ($this->success !== null) {
+      if (!is_array($this->success)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('success', TType::LST, 0);
+      {
+        $output->writeListBegin(TType::STRUCT, count($this->success));
+        {
+          foreach ($this->success as $iter223)
+          {
+            $xfer += $iter223->write($output);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o2 !== null) {
+      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+      $xfer += $this->o2->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
 class metastore_ThriftHiveMetastore_alter_partition_args {
   static $_TSPEC;
 
@@ -9063,14 +9400,14 @@ class metastore_ThriftHiveMetastore_part
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size217 = 0;
-            $_etype220 = 0;
-            $xfer += $input->readListBegin($_etype220, $_size217);
-            for ($_i221 = 0; $_i221 < $_size217; ++$_i221)
+            $_size224 = 0;
+            $_etype227 = 0;
+            $xfer += $input->readListBegin($_etype227, $_size224);
+            for ($_i228 = 0; $_i228 < $_size224; ++$_i228)
             {
-              $elem222 = null;
-              $xfer += $input->readString($elem222);
-              $this->success []= $elem222;
+              $elem229 = null;
+              $xfer += $input->readString($elem229);
+              $this->success []= $elem229;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -9106,9 +9443,9 @@ class metastore_ThriftHiveMetastore_part
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter223)
+          foreach ($this->success as $iter230)
           {
-            $xfer += $output->writeString($iter223);
+            $xfer += $output->writeString($iter230);
           }
         }
         $output->writeListEnd();
@@ -9259,17 +9596,17 @@ class metastore_ThriftHiveMetastore_part
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size224 = 0;
-            $_ktype225 = 0;
-            $_vtype226 = 0;
-            $xfer += $input->readMapBegin($_ktype225, $_vtype226, $_size224);
-            for ($_i228 = 0; $_i228 < $_size224; ++$_i228)
+            $_size231 = 0;
+            $_ktype232 = 0;
+            $_vtype233 = 0;
+            $xfer += $input->readMapBegin($_ktype232, $_vtype233, $_size231);
+            for ($_i235 = 0; $_i235 < $_size231; ++$_i235)
             {
-              $key229 = '';
-              $val230 = '';
-              $xfer += $input->readString($key229);
-              $xfer += $input->readString($val230);
-              $this->success[$key229] = $val230;
+              $key236 = '';
+              $val237 = '';
+              $xfer += $input->readString($key236);
+              $xfer += $input->readString($val237);
+              $this->success[$key236] = $val237;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -9305,10 +9642,10 @@ class metastore_ThriftHiveMetastore_part
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
         {
-          foreach ($this->success as $kiter231 => $viter232)
+          foreach ($this->success as $kiter238 => $viter239)
           {
-            $xfer += $output->writeString($kiter231);
-            $xfer += $output->writeString($viter232);
+            $xfer += $output->writeString($kiter238);
+            $xfer += $output->writeString($viter239);
           }
         }
         $output->writeMapEnd();
@@ -10231,15 +10568,15 @@ class metastore_ThriftHiveMetastore_get_
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size233 = 0;
-            $_etype236 = 0;
-            $xfer += $input->readListBegin($_etype236, $_size233);
-            for ($_i237 = 0; $_i237 < $_size233; ++$_i237)
+            $_size240 = 0;
+            $_etype243 = 0;
+            $xfer += $input->readListBegin($_etype243, $_size240);
+            for ($_i244 = 0; $_i244 < $_size240; ++$_i244)
             {
-              $elem238 = null;
-              $elem238 = new metastore_Index();
-              $xfer += $elem238->read($input);
-              $this->success []= $elem238;
+              $elem245 = null;
+              $elem245 = new metastore_Index();
+              $xfer += $elem245->read($input);
+              $this->success []= $elem245;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -10283,9 +10620,9 @@ class metastore_ThriftHiveMetastore_get_
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter239)
+          foreach ($this->success as $iter246)
           {
-            $xfer += $iter239->write($output);
+            $xfer += $iter246->write($output);
           }
         }
         $output->writeListEnd();
@@ -10477,14 +10814,14 @@ class metastore_ThriftHiveMetastore_get_
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size240 = 0;
-            $_etype243 = 0;
-            $xfer += $input->readListBegin($_etype243, $_size240);
-            for ($_i244 = 0; $_i244 < $_size240; ++$_i244)
+            $_size247 = 0;
+            $_etype250 = 0;
+            $xfer += $input->readListBegin($_etype250, $_size247);
+            for ($_i251 = 0; $_i251 < $_size247; ++$_i251)
             {
-              $elem245 = null;
-              $xfer += $input->readString($elem245);
-              $this->success []= $elem245;
+              $elem252 = null;
+              $xfer += $input->readString($elem252);
+              $this->success []= $elem252;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -10520,9 +10857,9 @@ class metastore_ThriftHiveMetastore_get_
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter246)
+          foreach ($this->success as $iter253)
           {
-            $xfer += $output->writeString($iter246);
+            $xfer += $output->writeString($iter253);
           }
         }
         $output->writeListEnd();

Modified: hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote (original)
+++ hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote Tue Sep 21 21:13:02 2010
@@ -49,6 +49,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == 
   print '   get_partition_names(string db_name, string tbl_name, i16 max_parts)'
   print '   get_partitions_ps(string db_name, string tbl_name,  part_vals, i16 max_parts)'
   print '   get_partition_names_ps(string db_name, string tbl_name,  part_vals, i16 max_parts)'
+  print '   get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)'
   print '  void alter_partition(string db_name, string tbl_name, Partition new_part)'
   print '  string get_config_value(string name, string defaultValue)'
   print '   partition_name_to_vals(string part_name)'
@@ -274,6 +275,12 @@ elif cmd == 'get_partition_names_ps':
     sys.exit(1)
   pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),))
 
+elif cmd == 'get_partitions_by_filter':
+  if len(args) != 4:
+    print 'get_partitions_by_filter requires 4 args'
+    sys.exit(1)
+  pp.pprint(client.get_partitions_by_filter(args[0],args[1],args[2],eval(args[3]),))
+
 elif cmd == 'alter_partition':
   if len(args) != 3:
     print 'alter_partition requires 3 args'

Modified: hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py (original)
+++ hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py Tue Sep 21 21:13:02 2010
@@ -245,6 +245,16 @@ class Iface(fb303.FacebookService.Iface)
     """
     pass
 
+  def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts):
+    """
+    Parameters:
+     - db_name
+     - tbl_name
+     - filter
+     - max_parts
+    """
+    pass
+
   def alter_partition(self, db_name, tbl_name, new_part):
     """
     Parameters:
@@ -1336,6 +1346,46 @@ class Client(fb303.FacebookService.Clien
       raise result.o1
     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result");
 
+  def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts):
+    """
+    Parameters:
+     - db_name
+     - tbl_name
+     - filter
+     - max_parts
+    """
+    self.send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts)
+    return self.recv_get_partitions_by_filter()
+
+  def send_get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts):
+    self._oprot.writeMessageBegin('get_partitions_by_filter', TMessageType.CALL, self._seqid)
+    args = get_partitions_by_filter_args()
+    args.db_name = db_name
+    args.tbl_name = tbl_name
+    args.filter = filter
+    args.max_parts = max_parts
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_partitions_by_filter(self, ):
+    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(self._iprot)
+      self._iprot.readMessageEnd()
+      raise x
+    result = get_partitions_by_filter_result()
+    result.read(self._iprot)
+    self._iprot.readMessageEnd()
+    if result.success != None:
+      return result.success
+    if result.o1 != None:
+      raise result.o1
+    if result.o2 != None:
+      raise result.o2
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result");
+
   def alter_partition(self, db_name, tbl_name, new_part):
     """
     Parameters:
@@ -1692,6 +1742,7 @@ class Processor(fb303.FacebookService.Pr
     self._processMap["get_partition_names"] = Processor.process_get_partition_names
     self._processMap["get_partitions_ps"] = Processor.process_get_partitions_ps
     self._processMap["get_partition_names_ps"] = Processor.process_get_partition_names_ps
+    self._processMap["get_partitions_by_filter"] = Processor.process_get_partitions_by_filter
     self._processMap["alter_partition"] = Processor.process_alter_partition
     self._processMap["get_config_value"] = Processor.process_get_config_value
     self._processMap["partition_name_to_vals"] = Processor.process_partition_name_to_vals
@@ -2169,6 +2220,22 @@ class Processor(fb303.FacebookService.Pr
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
+  def process_get_partitions_by_filter(self, seqid, iprot, oprot):
+    args = get_partitions_by_filter_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_partitions_by_filter_result()
+    try:
+      result.success = self._handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts)
+    except MetaException, o1:
+      result.o1 = o1
+    except NoSuchObjectException, o2:
+      result.o2 = o2
+    oprot.writeMessageBegin("get_partitions_by_filter", TMessageType.REPLY, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
   def process_alter_partition(self, seqid, iprot, oprot):
     args = alter_partition_args()
     args.read(iprot)
@@ -6643,6 +6710,188 @@ class get_partition_names_ps_result:
   def __ne__(self, other):
     return not (self == other)
 
+class get_partitions_by_filter_args:
+  """
+  Attributes:
+   - db_name
+   - tbl_name
+   - filter
+   - max_parts
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'db_name', None, None, ), # 1
+    (2, TType.STRING, 'tbl_name', None, None, ), # 2
+    (3, TType.STRING, 'filter', None, None, ), # 3
+    (4, TType.I16, 'max_parts', None, -1, ), # 4
+  )
+
+  def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4],):
+    self.db_name = db_name
+    self.tbl_name = tbl_name
+    self.filter = filter
+    self.max_parts = max_parts
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.db_name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.tbl_name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.filter = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.I16:
+          self.max_parts = iprot.readI16();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_partitions_by_filter_args')
+    if self.db_name != None:
+      oprot.writeFieldBegin('db_name', TType.STRING, 1)
+      oprot.writeString(self.db_name)
+      oprot.writeFieldEnd()
+    if self.tbl_name != None:
+      oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
+      oprot.writeString(self.tbl_name)
+      oprot.writeFieldEnd()
+    if self.filter != None:
+      oprot.writeFieldBegin('filter', TType.STRING, 3)
+      oprot.writeString(self.filter)
+      oprot.writeFieldEnd()
+    if self.max_parts != None:
+      oprot.writeFieldBegin('max_parts', TType.I16, 4)
+      oprot.writeI16(self.max_parts)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_partitions_by_filter_result:
+  """
+  Attributes:
+   - success
+   - o1
+   - o2
+  """
+
+  thrift_spec = (
+    (0, TType.LIST, 'success', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 0
+    (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+    (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2
+  )
+
+  def __init__(self, success=None, o1=None, o2=None,):
+    self.success = success
+    self.o1 = o1
+    self.o2 = o2
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.LIST:
+          self.success = []
+          (_etype220, _size217) = iprot.readListBegin()
+          for _i221 in xrange(_size217):
+            _elem222 = Partition()
+            _elem222.read(iprot)
+            self.success.append(_elem222)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 1:
+        if ftype == TType.STRUCT:
+          self.o1 = MetaException()
+          self.o1.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRUCT:
+          self.o2 = NoSuchObjectException()
+          self.o2.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_partitions_by_filter_result')
+    if self.success != None:
+      oprot.writeFieldBegin('success', TType.LIST, 0)
+      oprot.writeListBegin(TType.STRUCT, len(self.success))
+      for iter223 in self.success:
+        iter223.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.o1 != None:
+      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+      self.o1.write(oprot)
+      oprot.writeFieldEnd()
+    if self.o2 != None:
+      oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+      self.o2.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class alter_partition_args:
   """
   Attributes:
@@ -7014,10 +7263,10 @@ class partition_name_to_vals_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype220, _size217) = iprot.readListBegin()
-          for _i221 in xrange(_size217):
-            _elem222 = iprot.readString();
-            self.success.append(_elem222)
+          (_etype227, _size224) = iprot.readListBegin()
+          for _i228 in xrange(_size224):
+            _elem229 = iprot.readString();
+            self.success.append(_elem229)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7040,8 +7289,8 @@ class partition_name_to_vals_result:
     if self.success != None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter223 in self.success:
-        oprot.writeString(iter223)
+      for iter230 in self.success:
+        oprot.writeString(iter230)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 != None:
@@ -7146,11 +7395,11 @@ class partition_name_to_spec_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype225, _vtype226, _size224 ) = iprot.readMapBegin() 
-          for _i228 in xrange(_size224):
-            _key229 = iprot.readString();
-            _val230 = iprot.readString();
-            self.success[_key229] = _val230
+          (_ktype232, _vtype233, _size231 ) = iprot.readMapBegin() 
+          for _i235 in xrange(_size231):
+            _key236 = iprot.readString();
+            _val237 = iprot.readString();
+            self.success[_key236] = _val237
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -7173,9 +7422,9 @@ class partition_name_to_spec_result:
     if self.success != None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
-      for kiter231,viter232 in self.success.items():
-        oprot.writeString(kiter231)
-        oprot.writeString(viter232)
+      for kiter238,viter239 in self.success.items():
+        oprot.writeString(kiter238)
+        oprot.writeString(viter239)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o1 != None:
@@ -7807,11 +8056,11 @@ class get_indexes_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype236, _size233) = iprot.readListBegin()
-          for _i237 in xrange(_size233):
-            _elem238 = Index()
-            _elem238.read(iprot)
-            self.success.append(_elem238)
+          (_etype243, _size240) = iprot.readListBegin()
+          for _i244 in xrange(_size240):
+            _elem245 = Index()
+            _elem245.read(iprot)
+            self.success.append(_elem245)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7840,8 +8089,8 @@ class get_indexes_result:
     if self.success != None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter239 in self.success:
-        iter239.write(oprot)
+      for iter246 in self.success:
+        iter246.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 != None:
@@ -7974,10 +8223,10 @@ class get_index_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype243, _size240) = iprot.readListBegin()
-          for _i244 in xrange(_size240):
-            _elem245 = iprot.readString();
-            self.success.append(_elem245)
+          (_etype250, _size247) = iprot.readListBegin()
+          for _i251 in xrange(_size247):
+            _elem252 = iprot.readString();
+            self.success.append(_elem252)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -8000,8 +8249,8 @@ class get_index_names_result:
     if self.success != None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter246 in self.success:
-        oprot.writeString(iter246)
+      for iter253 in self.success:
+        oprot.writeString(iter253)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 != None:

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Tue Sep 21 21:13:02 2010
@@ -2074,6 +2074,32 @@ public class HiveMetaStore extends Thrif
       return ret;
     }
 
+    @Override
+    public List<Partition> get_partitions_by_filter(final String dbName,
+        final String tblName, final String filter, final short maxParts)
+        throws MetaException, NoSuchObjectException, TException {
+      incrementCounter("get_partitions_by_filter");
+      logStartTableFunction("get_partitions_by_filter", dbName, tblName);
+
+      List<Partition> ret = null;
+      try {
+        ret = executeWithRetry(new Command<List<Partition>>() {
+          @Override
+          List<Partition> run(RawStore ms) throws Exception {
+            return ms.getPartitionsByFilter(dbName, tblName, filter, maxParts);
+          }
+        });
+      } catch (MetaException e) {
+        throw e;
+      } catch (NoSuchObjectException e) {
+        throw e;
+      } catch (Exception e) {
+        assert(e instanceof RuntimeException);
+        throw (RuntimeException)e;
+      }
+      return ret;
+    }
+
   }
 
   /**

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Tue Sep 21 21:13:02 2010
@@ -517,6 +517,27 @@ public class HiveMetaStoreClient impleme
   }
 
   /**
+   * Get list of partitions matching specified filter
+   * @param db_name the database name
+   * @param tbl_name the table name
+   * @param filter the filter string,
+   *    for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can
+   *    be done only on string partition keys.
+   * @param max_parts the maximum number of partitions to return,
+   *    all partitions are returned if -1 is passed
+   * @return list of partitions
+   * @throws MetaException
+   * @throws NoSuchObjectException
+   * @throws TException
+   */
+  public List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+      String filter, short max_parts) throws MetaException,
+         NoSuchObjectException, TException {
+    return deepCopyPartitions(
+        client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts));
+  }
+
+  /**
    * @param name
    * @return the database
    * @throws NoSuchObjectException

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Tue Sep 21 21:13:02 2010
@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.hive.metastore;
 
+import java.io.ByteArrayInputStream;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -36,6 +38,10 @@ import javax.jdo.Query;
 import javax.jdo.Transaction;
 import javax.jdo.datastore.DataStoreCache;
 
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.CommonTokenStream;
+import org.antlr.runtime.RecognitionException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
@@ -62,6 +68,10 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
 import org.apache.hadoop.hive.metastore.model.MTable;
 import org.apache.hadoop.hive.metastore.model.MType;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream;
+import org.apache.hadoop.hive.metastore.parser.FilterLexer;
+import org.apache.hadoop.hive.metastore.parser.FilterParser;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -944,6 +954,102 @@ public class ObjectStore implements RawS
     return mparts;
   }
 
+  @Override
+  public List<Partition> getPartitionsByFilter(String dbName, String tblName,
+      String filter, short maxParts) throws MetaException, NoSuchObjectException {
+    openTransaction();
+    List<Partition> parts = convertToParts(listMPartitionsByFilter(dbName,
+        tblName, filter, maxParts));
+    commitTransaction();
+    return parts;
+  }
+
+  private List<MPartition> listMPartitionsByFilter(String dbName, String tableName,
+      String filter, short maxParts) throws MetaException, NoSuchObjectException{
+    boolean success = false;
+    List<MPartition> mparts = null;
+    try {
+      openTransaction();
+      LOG.debug("Executing listMPartitionsByFilter");
+      dbName = dbName.toLowerCase();
+      tableName = tableName.toLowerCase();
+
+      MTable mtable = getMTable(dbName, tableName);
+      if( mtable == null ) {
+        throw new NoSuchObjectException("Specified database/table does not exist : " 
+            + dbName + "." + tableName);
+      }
+
+      StringBuilder queryBuilder = new StringBuilder(
+          "table.tableName == t1 && table.database.name == t2");
+
+      Map<String, String> params = new HashMap<String, String>();
+
+      if( filter != null ) {
+
+        Table table = convertToTable(mtable);
+
+        CharStream cs = new ANTLRNoCaseStringStream(filter);
+        FilterLexer lexer = new FilterLexer(cs);
+
+        CommonTokenStream tokens = new CommonTokenStream();
+        tokens.setTokenSource (lexer);
+
+        FilterParser parser = new FilterParser(tokens);
+
+        try {
+          parser.filter();
+        } catch(RecognitionException re) {
+          throw new MetaException("Error parsing partition filter : " + re);
+        }
+
+        String jdoFilter = parser.tree.generateJDOFilter(table, params);
+
+        if( jdoFilter.trim().length() > 0 ) {
+          queryBuilder.append(" && ( ");
+          queryBuilder.append(jdoFilter.trim());
+          queryBuilder.append(" )");
+        }
+      }
+
+      Query query = pm.newQuery(MPartition.class,
+          queryBuilder.toString());
+
+      if( maxParts >= 0 ) {
+        //User specified a row limit, set it on the Query
+        query.setRange(0, maxParts);
+      }
+
+      //Create the parameter declaration string
+      StringBuilder paramDecl = new StringBuilder(
+          "java.lang.String t1, java.lang.String t2");
+      for(String key : params.keySet() ) {
+        paramDecl.append(", java.lang.String  " + key);
+      }
+
+      LOG.debug("Filter specified is " + filter + "," +
+             " JDOQL filter is " + queryBuilder.toString());
+
+      params.put("t1", tableName.trim());
+      params.put("t2", dbName.trim());
+
+      query.declareParameters(paramDecl.toString());
+      query.setOrdering("partitionName ascending");
+
+      mparts = (List<MPartition>) query.executeWithMap(params);
+
+      LOG.debug("Done executing query for listMPartitionsByFilter");
+      pm.retrieveAll(mparts);
+      success = commitTransaction();
+      LOG.debug("Done retrieving all objects for listMPartitionsByFilter");
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+    return mparts;
+  }
+
   public void alterTable(String dbname, String name, Table newTable)
       throws InvalidObjectException, MetaException {
     boolean success = false;

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Tue Sep 21 21:13:02 2010
@@ -122,4 +122,8 @@ public interface RawStore extends Config
   public abstract List<String> listIndexNames(String dbName,
       String origTableName, short max) throws MetaException;
 
+  public abstract List<Partition> getPartitionsByFilter(
+      String dbName, String tblName, String filter, short maxParts)
+      throws MetaException, NoSuchObjectException;
+
 }

Added: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java?rev=999644&view=auto
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (added)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java Tue Sep 21 21:13:02 2010
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.parser;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Stack;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde.Constants;
+
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+
+/**
+ * The Class representing the filter as a  binary tree. The tree has TreeNode's
+ * at intermediate level and the leaf level nodes are of type LeafNode.
+ */
+public class ExpressionTree {
+
+  /** The logical operations supported. */
+  public enum LogicalOperator {
+    AND,
+    OR
+  }
+
+  /** The operators supported. */
+  public enum Operator {
+    EQUALS  ("=", "=="),
+    GREATERTHAN  (">"),
+    LESSTHAN  ("<"),
+    LESSTHANOREQUALTO ("<="),
+    GREATERTHANOREQUALTO (">="),
+    LIKE ("LIKE", "matches"),
+    NOTEQUALS ("<>", "!=");
+
+    private final String op;
+    private final String jdoOp;
+
+    // private constructor
+    private Operator(String op){
+      this.op = op;
+      this.jdoOp = op;
+    }
+
+    private Operator(String op, String jdoOp){
+      this.op = op;
+      this.jdoOp = jdoOp;
+    }
+
+    public String getOp() {
+      return op;
+    }
+
+    public String getJdoOp() {
+      return jdoOp;
+    }
+
+    public static Operator fromString(String inputOperator) {
+      for(Operator op : Operator.values()) {
+        if(op.getOp().equals(inputOperator)){
+          return op;
+        }
+      }
+
+      throw new Error("Invalid value " + inputOperator +
+          " for " + Operator.class.getSimpleName());
+    }
+  }
+
+
+  /**
+   * The Class representing a Node in the ExpressionTree.
+   */
+  public static class TreeNode {
+    private TreeNode lhs;
+    private LogicalOperator andOr;
+    private TreeNode rhs;
+
+    public TreeNode() {
+    } 
+
+    public TreeNode(TreeNode lhs, LogicalOperator andOr, TreeNode rhs) {
+      this.lhs = lhs;
+      this.andOr = andOr;
+      this.rhs = rhs;
+    }
+
+    public String generateJDOFilter(Table table, Map<String, String> params)
+    throws MetaException {
+      StringBuilder filterBuffer = new StringBuilder();
+
+      if ( lhs != null) {
+        filterBuffer.append (" (");
+        filterBuffer.append(lhs.generateJDOFilter(table, params));
+
+        if (rhs != null) {
+          if( andOr == LogicalOperator.AND ) {
+            filterBuffer.append(" && ");
+          } else {
+            filterBuffer.append(" || ");
+          }
+
+          filterBuffer.append(rhs.generateJDOFilter(table, params));
+        }
+        filterBuffer.append (") ");
+      }
+
+      return filterBuffer.toString();
+    }
+
+  }
+
+  /**
+   * The Class representing the leaf level nodes in the ExpressionTree.
+   */
+  public static class LeafNode extends TreeNode {
+    public String keyName;
+    public Operator operator;
+    public String value;
+    public boolean isReverseOrder = false;
+    private static final String PARAM_PREFIX = "hive_filter_param_";
+
+    @Override
+    public String generateJDOFilter(Table table, Map<String, String> params)
+    throws MetaException {
+      int partitionIndex;
+      for(partitionIndex = 0;
+      partitionIndex < table.getPartitionKeys().size();
+      partitionIndex++ ) {
+        if( table.getPartitionKeys().get(partitionIndex).getName().
+            equalsIgnoreCase(keyName)) {
+          break;
+        }
+      }
+
+      if( partitionIndex == table.getPartitionKeys().size() ) {
+        throw new MetaException("Specified key <" + keyName +
+            "> is not a partitioning key for the table");
+      }
+
+      if( ! table.getPartitionKeys().get(partitionIndex).
+          getType().equals(Constants.STRING_TYPE_NAME) ) {
+        throw new MetaException
+        ("Filtering is supported only on partition keys of type string");
+      }
+
+      String paramName = PARAM_PREFIX + params.size();
+      params.put(paramName, value);
+      String filter;
+
+      //Handle "a > 10" and "10 > a" appropriately
+      if (isReverseOrder){
+        //For LIKE, the value should be on the RHS
+        if( operator == Operator.LIKE ) {
+          throw new MetaException(
+              "Value should be on the RHS for LIKE operator : " +
+              "Key <" + keyName + ">");
+        }
+
+        filter = paramName +
+          " " + operator.getJdoOp() + " " +
+          " this.values.get(" + partitionIndex + ")";
+      } else {
+        if( operator == Operator.LIKE ) {
+          //generate this.values.get(i).matches("abc%")
+          filter = " this.values.get(" + partitionIndex + ")."
+              + operator.getJdoOp() + "(" + paramName + ") ";
+        } else {
+          filter = " this.values.get(" + partitionIndex + ") "
+              + operator.getJdoOp() + " " + paramName;
+        }
+      }
+      return filter;
+    }
+  }
+
+  /**
+   * The root node for the tree.
+   */
+  private TreeNode root = null;
+
+  /**
+   * The node stack used to keep track of the tree nodes during parsing.
+   */
+  private final Stack<TreeNode> nodeStack = new Stack<TreeNode>();
+
+  /**
+   * Adds a intermediate node of either type(AND/OR). Pops last two nodes from
+   * the stack and sets them as children of the new node and pushes itself
+   * onto the stack.
+   * @param andOr the operator type
+   */
+  public void addIntermediateNode(LogicalOperator andOr) {
+
+    TreeNode rhs = nodeStack.pop();
+    TreeNode lhs = nodeStack.pop();
+    TreeNode newNode = new TreeNode(lhs, andOr, rhs);
+    nodeStack.push(newNode);
+    root = newNode;
+  }
+
+  /**
+   * Adds a leaf node, pushes the new node onto the stack.
+   * @param newNode the new node
+   */
+  public void addLeafNode(LeafNode newNode) {
+    if( root == null ) {
+      root = newNode;
+    }
+    nodeStack.push(newNode);
+  }
+
+  /** Generate the JDOQL filter for the given expression tree
+   * @param table the table being queried
+   * @param params the input map which is updated with the
+   *     the parameterized values. Keys are the parameter names and values
+   *     are the parameter values
+   * @return the string representation of the expression tree
+   * @throws MetaException
+   */
+  public String generateJDOFilter(Table table,
+        Map<String, String> params) throws MetaException {
+    if( root == null ) {
+      return "";
+    }
+
+    return root.generateJDOFilter(table, params);
+  }
+
+  /** Case insensitive ANTLR string stream */
+  public static class ANTLRNoCaseStringStream extends ANTLRStringStream {
+    public ANTLRNoCaseStringStream (String input) {
+      super(input);
+    }
+
+    public int LA (int i) {
+      int returnChar = super.LA (i);
+
+      if (returnChar == CharStream.EOF) {
+        return returnChar;
+      }
+      else if (returnChar == 0) {
+        return returnChar;
+      }
+
+      return Character.toUpperCase ((char) returnChar);
+    }
+  }
+}

Added: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g?rev=999644&view=auto
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g (added)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g Tue Sep 21 21:13:02 2010
@@ -0,0 +1,130 @@
+grammar Filter;
+
+options
+{
+  k=3;
+}
+
+
+// Package headers
+@header {
+package org.apache.hadoop.hive.metastore.parser;
+
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator;
+}
+@lexer::header {package org.apache.hadoop.hive.metastore.parser;}
+
+@members {
+  public ExpressionTree tree = new ExpressionTree();
+
+  public static String TrimQuotes (String input) {
+    if (input.length () > 1) {
+      if ((input.charAt (0) == '"' && input.charAt (input.length () - 1) == '"')
+        || (input.charAt (0) == '\'' && input.charAt (input.length () - 1) == '\'')) {
+        return input.substring (1, input.length () - 1);
+      }
+    }
+    return input;
+  }
+}
+
+@rulecatch{
+  catch (RecognitionException e){
+    throw e;
+  }
+}
+
+//main rule
+filter
+    :
+    orExpression 
+    ;
+
+orExpression
+    :
+    andExpression (KW_OR andExpression { tree.addIntermediateNode(LogicalOperator.OR); } )*
+    ;
+
+andExpression
+    :
+    expression (KW_AND expression  { tree.addIntermediateNode(LogicalOperator.AND); } )*
+    ;
+
+expression
+    :
+    LPAREN orExpression RPAREN
+    |
+    operatorExpression
+    ;
+
+operatorExpression 
+@init { 
+    boolean isReverseOrder = false;
+}
+    :
+    (
+       (key = Identifier op = operator  value = StringLiteral)
+       | 
+       (value = StringLiteral  op = operator key = Identifier) { isReverseOrder = true; }
+    )
+    {
+        LeafNode node = new LeafNode();
+        node.keyName = key.getText();
+        node.value = TrimQuotes(value.getText());
+        node.operator = op;
+        node.isReverseOrder = isReverseOrder;
+
+        tree.addLeafNode(node);
+    };
+
+operator returns [Operator op]
+   :
+   t = (LESSTHAN | LESSTHANOREQUALTO | GREATERTHAN | GREATERTHANOREQUALTO | KW_LIKE | EQUAL | NOTEQUAL)
+   {
+      $op = Operator.fromString(t.getText().toUpperCase());
+   };
+
+// Keywords
+KW_AND : 'AND';
+KW_OR : 'OR';
+KW_LIKE : 'LIKE';
+
+// Operators
+LPAREN : '(' ;
+RPAREN : ')' ;
+EQUAL : '=';
+NOTEQUAL : '<>';
+LESSTHANOREQUALTO : '<=';
+LESSTHAN : '<';
+GREATERTHANOREQUALTO : '>=';
+GREATERTHAN : '>';
+
+// LITERALS
+fragment
+Letter
+    : 'a'..'z' | 'A'..'Z'
+    ;
+
+fragment
+Digit
+    :
+    '0'..'9'
+    ;
+
+StringLiteral
+    :
+    ( '\'' ( ~('\''|'\\') | ('\\' .) )* '\''
+    | '\"' ( ~('\"'|'\\') | ('\\' .) )* '\"'
+    )
+    ;
+
+Identifier
+    :
+    (Letter | Digit) (Letter | Digit | '_')*
+    ;
+
+WS  :   (' '|'\r'|'\t'|'\n')+ { skip(); } ;
+

Added: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java?rev=999644&view=auto
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java (added)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java Tue Sep 21 21:13:02 2010
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package implements the parser for parsing the string filter
+ * for the listPartitionsByFilter API.
+ */
+package org.apache.hadoop.hive.metastore.parser;

Modified: hadoop/hive/trunk/metastore/src/model/package.jdo
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/model/package.jdo?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/model/package.jdo (original)
+++ hadoop/hive/trunk/metastore/src/model/package.jdo Tue Sep 21 21:13:02 2010
@@ -128,10 +128,10 @@
            <column name="PARAM_VALUE" length="767" jdbc-type="VARCHAR"/>
         </value>
       </field>
-      <field name="viewOriginalText">
+      <field name="viewOriginalText" default-fetch-group="false">
         <column name="VIEW_ORIGINAL_TEXT" jdbc-type="LONGVARCHAR"/>
       </field>
-      <field name="viewExpandedText">
+      <field name="viewExpandedText" default-fetch-group="false">
         <column name="VIEW_EXPANDED_TEXT" jdbc-type="LONGVARCHAR"/>
       </field>
       <field name="tableType">

Modified: hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Tue Sep 21 21:13:02 2010
@@ -31,6 +31,7 @@ import junit.framework.TestCase;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -968,4 +969,186 @@ public abstract class TestHiveMetaStore 
     } catch (InvalidOperationException e) {
     }
   }
+
+  /**
+   * Tests for list partition by filter functionality.
+   * @throws Exception
+   */
+  public void testPartitionFilter() throws Exception {
+      String dbName = "filterdb";
+      String tblName = "filtertbl";
+
+      List<String> vals = new ArrayList<String>(3);
+      vals.add("p11");
+      vals.add("p21");
+      vals.add("p31");
+      List <String> vals2 = new ArrayList<String>(3);
+      vals2.add("p11");
+      vals2.add("p22");
+      vals2.add("p31");
+      List <String> vals3 = new ArrayList<String>(3);
+      vals3.add("p12");
+      vals3.add("p21");
+      vals3.add("p31");
+      List <String> vals4 = new ArrayList<String>(3);
+      vals4.add("p12");
+      vals4.add("p23");
+      vals4.add("p31");
+      List <String> vals5 = new ArrayList<String>(3);
+      vals5.add("p13");
+      vals5.add("p24");
+      vals5.add("p31");
+      List <String> vals6 = new ArrayList<String>(3);
+      vals6.add("p13");
+      vals6.add("p25");
+      vals6.add("p31");
+
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+
+      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
+      cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
+      cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, ""));
+
+      ArrayList<FieldSchema> partCols = new ArrayList<FieldSchema>(3);
+      partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
+      partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, ""));
+      partCols.add(new FieldSchema("p3", Constants.INT_TYPE_NAME, ""));
+
+      Table tbl = new Table();
+      tbl.setDbName(dbName);
+      tbl.setTableName(tblName);
+      StorageDescriptor sd = new StorageDescriptor();
+      tbl.setSd(sd);
+      sd.setCols(cols);
+      sd.setCompressed(false);
+      sd.setNumBuckets(1);
+      sd.setParameters(new HashMap<String, String>());
+      sd.setBucketCols(new ArrayList<String>());
+      sd.setSerdeInfo(new SerDeInfo());
+      sd.getSerdeInfo().setName(tbl.getTableName());
+      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+      sd.getSerdeInfo().getParameters()
+          .put(Constants.SERIALIZATION_FORMAT, "1");
+      sd.setSortCols(new ArrayList<Order>());
+
+      tbl.setPartitionKeys(partCols);
+      client.createTable(tbl);
+
+      tbl = client.getTable(dbName, tblName);
+
+      add_partition(client, tbl, vals, "part1");
+      add_partition(client, tbl, vals2, "part2");
+      add_partition(client, tbl, vals3, "part3");
+      add_partition(client, tbl, vals4, "part4");
+      add_partition(client, tbl, vals5, "part5");
+      add_partition(client, tbl, vals6, "part6");
+
+      checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2);
+      checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
+      checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1);
+      checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1);
+      checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3);
+      checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+
+      checkFilter(client, dbName, tblName,
+          "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3);
+      checkFilter(client, dbName, tblName,
+         "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " +
+         "(p1=\"p13\" aNd p2=\"p24\")", 4);
+      //test for and or precedence
+      checkFilter(client, dbName, tblName,
+         "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); 
+      checkFilter(client, dbName, tblName,
+         "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); 
+
+      checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4);
+      checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4);
+      checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 4);
+      checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6);
+      checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1);
+
+      //Test for setting the maximum partition count
+      List<Partition> partitions = client.listPartitionsByFilter(dbName,
+          tblName, "p1 >= \"p12\"", (short) 2);
+      assertEquals("User specified row limit for partitions",
+          2, partitions.size());
+
+      //Negative tests
+      Exception me = null;
+      try {
+        client.listPartitionsByFilter(dbName,
+            tblName, "p3 >= \"p12\"", (short) -1);
+      } catch(MetaException e) {
+        me = e;
+      }
+      assertNotNull(me);
+      assertTrue("Filter on int partition key", me.getMessage().contains(
+            "Filtering is supported only on partition keys of type string"));
+
+      me = null;
+      try {
+        client.listPartitionsByFilter(dbName,
+            tblName, "c1 >= \"p12\"", (short) -1);
+      } catch(MetaException e) {
+        me = e;
+      }
+      assertNotNull(me);
+      assertTrue("Filter on invalid key", me.getMessage().contains(
+            "<c1> is not a partitioning key for the table"));
+
+      me = null;
+      try {
+        client.listPartitionsByFilter(dbName,
+            tblName, "c1 >= ", (short) -1);
+      } catch(MetaException e) {
+        me = e;
+      }
+      assertNotNull(me);
+      assertTrue("Invalid filter string", me.getMessage().contains(
+            "Error parsing partition filter"));
+
+      me = null;
+      try {
+        client.listPartitionsByFilter("invDBName",
+            "invTableName", "p1 = \"p11\"", (short) -1);
+      } catch(NoSuchObjectException e) {
+        me = e;
+      }
+      assertNotNull(me);
+      assertTrue("NoSuchObject exception", me.getMessage().contains(
+            "database/table does not exist"));
+  }
+
+  private void checkFilter(HiveMetaStoreClient client, String dbName,
+        String tblName, String filter, int expectedCount)
+        throws MetaException, NoSuchObjectException, TException {
+    List<Partition> partitions = client.listPartitionsByFilter(dbName,
+            tblName, filter, (short) -1);
+
+    assertEquals("Partition count expected for filter " + filter,
+            expectedCount, partitions.size());
+  }
+
+  private void add_partition(HiveMetaStoreClient client, Table table,
+      List<String> vals, String location) throws InvalidObjectException,
+        AlreadyExistsException, MetaException, TException {
+
+    Partition part = new Partition();
+    part.setDbName(table.getDbName());
+    part.setTableName(table.getTableName());
+    part.setValues(vals);
+    part.setParameters(new HashMap<String, String>());
+    part.setSd(table.getSd());
+    part.getSd().setSerdeInfo(table.getSd().getSerdeInfo());
+    part.getSd().setLocation(table.getSd().getLocation() + location);
+
+    client.add_partition(part);
+  }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Tue Sep 21 21:13:02 2010
@@ -89,8 +89,8 @@ import org.apache.hadoop.hive.ql.plan.Gr
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.PlanUtils.ExpressionTypes;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.plan.PlanUtils.ExpressionTypes;
 import org.apache.hadoop.hive.serde.Constants;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -408,8 +408,8 @@ public final class Utilities {
     e.setPersistenceDelegate(Operator.ProgressCounter.class,
         new EnumDelegate());
 
-    e.setPersistenceDelegate(org.datanucleus.sco.backed.Map.class, new MapDelegate());
-    e.setPersistenceDelegate(org.datanucleus.sco.backed.List.class, new ListDelegate());
+    e.setPersistenceDelegate(org.datanucleus.store.types.sco.backed.Map.class, new MapDelegate());
+    e.setPersistenceDelegate(org.datanucleus.store.types.sco.backed.List.class, new ListDelegate());
 
     e.writeObject(plan);
     e.close();