You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sd...@apache.org on 2011/10/29 02:07:12 UTC

svn commit: r1190696 [2/3] - in /hive/trunk: metastore/if/ metastore/src/gen/thrift/gen-cpp/ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ metastore/src/gen/thrift/gen-php/hive_metastore/ metastore/src/gen/thrift/gen-py/hi...

Modified: hive/trunk/metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php Sat Oct 29 00:07:10 2011
@@ -47,7 +47,7 @@ interface ThriftHiveMetastoreIf extends 
   public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts);
   public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
   public function get_partitions_by_names($db_name, $tbl_name, $names);
-  public function alter_partition($db_name, $tbl_name, $new_part);
+  public function alter_partition($db_name, $tbl_name, $part_vals, $new_part);
   public function get_config_value($name, $defaultValue);
   public function partition_name_to_vals($part_name);
   public function partition_name_to_spec($part_name);
@@ -2240,17 +2240,18 @@ class ThriftHiveMetastoreClient extends 
     throw new Exception("get_partitions_by_names failed: unknown result");
   }
 
-  public function alter_partition($db_name, $tbl_name, $new_part)
+  public function alter_partition($db_name, $tbl_name, $part_vals, $new_part)
   {
-    $this->send_alter_partition($db_name, $tbl_name, $new_part);
+    $this->send_alter_partition($db_name, $tbl_name, $part_vals, $new_part);
     $this->recv_alter_partition();
   }
 
-  public function send_alter_partition($db_name, $tbl_name, $new_part)
+  public function send_alter_partition($db_name, $tbl_name, $part_vals, $new_part)
   {
     $args = new ThriftHiveMetastore_alter_partition_args();
     $args->db_name = $db_name;
     $args->tbl_name = $tbl_name;
+    $args->part_vals = $part_vals;
     $args->new_part = $new_part;
     $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary');
     if ($bin_accel)
@@ -12601,6 +12602,7 @@ class ThriftHiveMetastore_alter_partitio
 
   public $db_name = null;
   public $tbl_name = null;
+  public $part_vals = null;
   public $new_part = null;
 
   public function __construct($vals=null) {
@@ -12615,6 +12617,14 @@ class ThriftHiveMetastore_alter_partitio
           'type' => TType::STRING,
           ),
         3 => array(
+          'var' => 'part_vals',
+          'type' => TType::LST,
+          'etype' => TType::STRING,
+          'elem' => array(
+            'type' => TType::STRING,
+            ),
+          ),
+        4 => array(
           'var' => 'new_part',
           'type' => TType::STRUCT,
           'class' => 'Partition',
@@ -12628,6 +12638,9 @@ class ThriftHiveMetastore_alter_partitio
       if (isset($vals['tbl_name'])) {
         $this->tbl_name = $vals['tbl_name'];
       }
+      if (isset($vals['part_vals'])) {
+        $this->part_vals = $vals['part_vals'];
+      }
       if (isset($vals['new_part'])) {
         $this->new_part = $vals['new_part'];
       }
@@ -12668,6 +12681,23 @@ class ThriftHiveMetastore_alter_partitio
           }
           break;
         case 3:
+          if ($ftype == TType::LST) {
+            $this->part_vals = array();
+            $_size386 = 0;
+            $_etype389 = 0;
+            $xfer += $input->readListBegin($_etype389, $_size386);
+            for ($_i390 = 0; $_i390 < $_size386; ++$_i390)
+            {
+              $elem391 = null;
+              $xfer += $input->readString($elem391);
+              $this->part_vals []= $elem391;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 4:
           if ($ftype == TType::STRUCT) {
             $this->new_part = new Partition();
             $xfer += $this->new_part->read($input);
@@ -12698,11 +12728,28 @@ class ThriftHiveMetastore_alter_partitio
       $xfer += $output->writeString($this->tbl_name);
       $xfer += $output->writeFieldEnd();
     }
+    if ($this->part_vals !== null) {
+      if (!is_array($this->part_vals)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3);
+      {
+        $output->writeListBegin(TType::STRING, count($this->part_vals));
+        {
+          foreach ($this->part_vals as $iter392)
+          {
+            $xfer += $output->writeString($iter392);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
     if ($this->new_part !== null) {
       if (!is_object($this->new_part)) {
         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
       }
-      $xfer += $output->writeFieldBegin('new_part', TType::STRUCT, 3);
+      $xfer += $output->writeFieldBegin('new_part', TType::STRUCT, 4);
       $xfer += $this->new_part->write($output);
       $xfer += $output->writeFieldEnd();
     }
@@ -13123,14 +13170,14 @@ class ThriftHiveMetastore_partition_name
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size386 = 0;
-            $_etype389 = 0;
-            $xfer += $input->readListBegin($_etype389, $_size386);
-            for ($_i390 = 0; $_i390 < $_size386; ++$_i390)
+            $_size393 = 0;
+            $_etype396 = 0;
+            $xfer += $input->readListBegin($_etype396, $_size393);
+            for ($_i397 = 0; $_i397 < $_size393; ++$_i397)
             {
-              $elem391 = null;
-              $xfer += $input->readString($elem391);
-              $this->success []= $elem391;
+              $elem398 = null;
+              $xfer += $input->readString($elem398);
+              $this->success []= $elem398;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -13166,9 +13213,9 @@ class ThriftHiveMetastore_partition_name
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter392)
+          foreach ($this->success as $iter399)
           {
-            $xfer += $output->writeString($iter392);
+            $xfer += $output->writeString($iter399);
           }
         }
         $output->writeListEnd();
@@ -13319,17 +13366,17 @@ class ThriftHiveMetastore_partition_name
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size393 = 0;
-            $_ktype394 = 0;
-            $_vtype395 = 0;
-            $xfer += $input->readMapBegin($_ktype394, $_vtype395, $_size393);
-            for ($_i397 = 0; $_i397 < $_size393; ++$_i397)
+            $_size400 = 0;
+            $_ktype401 = 0;
+            $_vtype402 = 0;
+            $xfer += $input->readMapBegin($_ktype401, $_vtype402, $_size400);
+            for ($_i404 = 0; $_i404 < $_size400; ++$_i404)
             {
-              $key398 = '';
-              $val399 = '';
-              $xfer += $input->readString($key398);
-              $xfer += $input->readString($val399);
-              $this->success[$key398] = $val399;
+              $key405 = '';
+              $val406 = '';
+              $xfer += $input->readString($key405);
+              $xfer += $input->readString($val406);
+              $this->success[$key405] = $val406;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -13365,10 +13412,10 @@ class ThriftHiveMetastore_partition_name
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
         {
-          foreach ($this->success as $kiter400 => $viter401)
+          foreach ($this->success as $kiter407 => $viter408)
           {
-            $xfer += $output->writeString($kiter400);
-            $xfer += $output->writeString($viter401);
+            $xfer += $output->writeString($kiter407);
+            $xfer += $output->writeString($viter408);
           }
         }
         $output->writeMapEnd();
@@ -13476,17 +13523,17 @@ class ThriftHiveMetastore_markPartitionF
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size402 = 0;
-            $_ktype403 = 0;
-            $_vtype404 = 0;
-            $xfer += $input->readMapBegin($_ktype403, $_vtype404, $_size402);
-            for ($_i406 = 0; $_i406 < $_size402; ++$_i406)
+            $_size409 = 0;
+            $_ktype410 = 0;
+            $_vtype411 = 0;
+            $xfer += $input->readMapBegin($_ktype410, $_vtype411, $_size409);
+            for ($_i413 = 0; $_i413 < $_size409; ++$_i413)
             {
-              $key407 = '';
-              $val408 = '';
-              $xfer += $input->readString($key407);
-              $xfer += $input->readString($val408);
-              $this->part_vals[$key407] = $val408;
+              $key414 = '';
+              $val415 = '';
+              $xfer += $input->readString($key414);
+              $xfer += $input->readString($val415);
+              $this->part_vals[$key414] = $val415;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -13531,10 +13578,10 @@ class ThriftHiveMetastore_markPartitionF
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter409 => $viter410)
+          foreach ($this->part_vals as $kiter416 => $viter417)
           {
-            $xfer += $output->writeString($kiter409);
-            $xfer += $output->writeString($viter410);
+            $xfer += $output->writeString($kiter416);
+            $xfer += $output->writeString($viter417);
           }
         }
         $output->writeMapEnd();
@@ -13826,17 +13873,17 @@ class ThriftHiveMetastore_isPartitionMar
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size411 = 0;
-            $_ktype412 = 0;
-            $_vtype413 = 0;
-            $xfer += $input->readMapBegin($_ktype412, $_vtype413, $_size411);
-            for ($_i415 = 0; $_i415 < $_size411; ++$_i415)
+            $_size418 = 0;
+            $_ktype419 = 0;
+            $_vtype420 = 0;
+            $xfer += $input->readMapBegin($_ktype419, $_vtype420, $_size418);
+            for ($_i422 = 0; $_i422 < $_size418; ++$_i422)
             {
-              $key416 = '';
-              $val417 = '';
-              $xfer += $input->readString($key416);
-              $xfer += $input->readString($val417);
-              $this->part_vals[$key416] = $val417;
+              $key423 = '';
+              $val424 = '';
+              $xfer += $input->readString($key423);
+              $xfer += $input->readString($val424);
+              $this->part_vals[$key423] = $val424;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -13881,10 +13928,10 @@ class ThriftHiveMetastore_isPartitionMar
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter418 => $viter419)
+          foreach ($this->part_vals as $kiter425 => $viter426)
           {
-            $xfer += $output->writeString($kiter418);
-            $xfer += $output->writeString($viter419);
+            $xfer += $output->writeString($kiter425);
+            $xfer += $output->writeString($viter426);
           }
         }
         $output->writeMapEnd();
@@ -15244,15 +15291,15 @@ class ThriftHiveMetastore_get_indexes_re
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size420 = 0;
-            $_etype423 = 0;
-            $xfer += $input->readListBegin($_etype423, $_size420);
-            for ($_i424 = 0; $_i424 < $_size420; ++$_i424)
+            $_size427 = 0;
+            $_etype430 = 0;
+            $xfer += $input->readListBegin($_etype430, $_size427);
+            for ($_i431 = 0; $_i431 < $_size427; ++$_i431)
             {
-              $elem425 = null;
-              $elem425 = new Index();
-              $xfer += $elem425->read($input);
-              $this->success []= $elem425;
+              $elem432 = null;
+              $elem432 = new Index();
+              $xfer += $elem432->read($input);
+              $this->success []= $elem432;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15296,9 +15343,9 @@ class ThriftHiveMetastore_get_indexes_re
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter426)
+          foreach ($this->success as $iter433)
           {
-            $xfer += $iter426->write($output);
+            $xfer += $iter433->write($output);
           }
         }
         $output->writeListEnd();
@@ -15490,14 +15537,14 @@ class ThriftHiveMetastore_get_index_name
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size427 = 0;
-            $_etype430 = 0;
-            $xfer += $input->readListBegin($_etype430, $_size427);
-            for ($_i431 = 0; $_i431 < $_size427; ++$_i431)
+            $_size434 = 0;
+            $_etype437 = 0;
+            $xfer += $input->readListBegin($_etype437, $_size434);
+            for ($_i438 = 0; $_i438 < $_size434; ++$_i438)
             {
-              $elem432 = null;
-              $xfer += $input->readString($elem432);
-              $this->success []= $elem432;
+              $elem439 = null;
+              $xfer += $input->readString($elem439);
+              $this->success []= $elem439;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15533,9 +15580,9 @@ class ThriftHiveMetastore_get_index_name
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter433)
+          foreach ($this->success as $iter440)
           {
-            $xfer += $output->writeString($iter433);
+            $xfer += $output->writeString($iter440);
           }
         }
         $output->writeListEnd();
@@ -15997,14 +16044,14 @@ class ThriftHiveMetastore_get_role_names
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size434 = 0;
-            $_etype437 = 0;
-            $xfer += $input->readListBegin($_etype437, $_size434);
-            for ($_i438 = 0; $_i438 < $_size434; ++$_i438)
+            $_size441 = 0;
+            $_etype444 = 0;
+            $xfer += $input->readListBegin($_etype444, $_size441);
+            for ($_i445 = 0; $_i445 < $_size441; ++$_i445)
             {
-              $elem439 = null;
-              $xfer += $input->readString($elem439);
-              $this->success []= $elem439;
+              $elem446 = null;
+              $xfer += $input->readString($elem446);
+              $this->success []= $elem446;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16040,9 +16087,9 @@ class ThriftHiveMetastore_get_role_names
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter440)
+          foreach ($this->success as $iter447)
           {
-            $xfer += $output->writeString($iter440);
+            $xfer += $output->writeString($iter447);
           }
         }
         $output->writeListEnd();
@@ -16682,15 +16729,15 @@ class ThriftHiveMetastore_list_roles_res
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size441 = 0;
-            $_etype444 = 0;
-            $xfer += $input->readListBegin($_etype444, $_size441);
-            for ($_i445 = 0; $_i445 < $_size441; ++$_i445)
+            $_size448 = 0;
+            $_etype451 = 0;
+            $xfer += $input->readListBegin($_etype451, $_size448);
+            for ($_i452 = 0; $_i452 < $_size448; ++$_i452)
             {
-              $elem446 = null;
-              $elem446 = new Role();
-              $xfer += $elem446->read($input);
-              $this->success []= $elem446;
+              $elem453 = null;
+              $elem453 = new Role();
+              $xfer += $elem453->read($input);
+              $this->success []= $elem453;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16726,9 +16773,9 @@ class ThriftHiveMetastore_list_roles_res
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter447)
+          foreach ($this->success as $iter454)
           {
-            $xfer += $iter447->write($output);
+            $xfer += $iter454->write($output);
           }
         }
         $output->writeListEnd();
@@ -16826,14 +16873,14 @@ class ThriftHiveMetastore_get_privilege_
         case 3:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size448 = 0;
-            $_etype451 = 0;
-            $xfer += $input->readListBegin($_etype451, $_size448);
-            for ($_i452 = 0; $_i452 < $_size448; ++$_i452)
+            $_size455 = 0;
+            $_etype458 = 0;
+            $xfer += $input->readListBegin($_etype458, $_size455);
+            for ($_i459 = 0; $_i459 < $_size455; ++$_i459)
             {
-              $elem453 = null;
-              $xfer += $input->readString($elem453);
-              $this->group_names []= $elem453;
+              $elem460 = null;
+              $xfer += $input->readString($elem460);
+              $this->group_names []= $elem460;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -16874,9 +16921,9 @@ class ThriftHiveMetastore_get_privilege_
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter454)
+          foreach ($this->group_names as $iter461)
           {
-            $xfer += $output->writeString($iter454);
+            $xfer += $output->writeString($iter461);
           }
         }
         $output->writeListEnd();
@@ -17163,15 +17210,15 @@ class ThriftHiveMetastore_list_privilege
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size455 = 0;
-            $_etype458 = 0;
-            $xfer += $input->readListBegin($_etype458, $_size455);
-            for ($_i459 = 0; $_i459 < $_size455; ++$_i459)
+            $_size462 = 0;
+            $_etype465 = 0;
+            $xfer += $input->readListBegin($_etype465, $_size462);
+            for ($_i466 = 0; $_i466 < $_size462; ++$_i466)
             {
-              $elem460 = null;
-              $elem460 = new HiveObjectPrivilege();
-              $xfer += $elem460->read($input);
-              $this->success []= $elem460;
+              $elem467 = null;
+              $elem467 = new HiveObjectPrivilege();
+              $xfer += $elem467->read($input);
+              $this->success []= $elem467;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17207,9 +17254,9 @@ class ThriftHiveMetastore_list_privilege
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter461)
+          foreach ($this->success as $iter468)
           {
-            $xfer += $iter461->write($output);
+            $xfer += $iter468->write($output);
           }
         }
         $output->writeListEnd();

Modified: hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote Sat Oct 29 00:07:10 2011
@@ -58,7 +58,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == 
   print '   get_partition_names_ps(string db_name, string tbl_name,  part_vals, i16 max_parts)'
   print '   get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)'
   print '   get_partitions_by_names(string db_name, string tbl_name,  names)'
-  print '  void alter_partition(string db_name, string tbl_name, Partition new_part)'
+  print '  void alter_partition(string db_name, string tbl_name,  part_vals, Partition new_part)'
   print '  string get_config_value(string name, string defaultValue)'
   print '   partition_name_to_vals(string part_name)'
   print '   partition_name_to_spec(string part_name)'
@@ -356,10 +356,10 @@ elif cmd == 'get_partitions_by_names':
   pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),))
 
 elif cmd == 'alter_partition':
-  if len(args) != 3:
-    print 'alter_partition requires 3 args'
+  if len(args) != 4:
+    print 'alter_partition requires 4 args'
     sys.exit(1)
-  pp.pprint(client.alter_partition(args[0],args[1],eval(args[2]),))
+  pp.pprint(client.alter_partition(args[0],args[1],eval(args[2]),eval(args[3]),))
 
 elif cmd == 'get_config_value':
   if len(args) != 2:

Modified: hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py Sat Oct 29 00:07:10 2011
@@ -331,11 +331,12 @@ class Iface(fb303.FacebookService.Iface)
     """
     pass
 
-  def alter_partition(self, db_name, tbl_name, new_part):
+  def alter_partition(self, db_name, tbl_name, part_vals, new_part):
     """
     Parameters:
      - db_name
      - tbl_name
+     - part_vals
      - new_part
     """
     pass
@@ -1908,21 +1909,23 @@ class Client(fb303.FacebookService.Clien
       raise result.o2
     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result");
 
-  def alter_partition(self, db_name, tbl_name, new_part):
+  def alter_partition(self, db_name, tbl_name, part_vals, new_part):
     """
     Parameters:
      - db_name
      - tbl_name
+     - part_vals
      - new_part
     """
-    self.send_alter_partition(db_name, tbl_name, new_part)
+    self.send_alter_partition(db_name, tbl_name, part_vals, new_part)
     self.recv_alter_partition()
 
-  def send_alter_partition(self, db_name, tbl_name, new_part):
+  def send_alter_partition(self, db_name, tbl_name, part_vals, new_part):
     self._oprot.writeMessageBegin('alter_partition', TMessageType.CALL, self._seqid)
     args = alter_partition_args()
     args.db_name = db_name
     args.tbl_name = tbl_name
+    args.part_vals = part_vals
     args.new_part = new_part
     args.write(self._oprot)
     self._oprot.writeMessageEnd()
@@ -3489,7 +3492,7 @@ class Processor(fb303.FacebookService.Pr
     iprot.readMessageEnd()
     result = alter_partition_result()
     try:
-      self._handler.alter_partition(args.db_name, args.tbl_name, args.new_part)
+      self._handler.alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part)
     except InvalidOperationException, o1:
       result.o1 = o1
     except MetaException, o2:
@@ -10161,6 +10164,7 @@ class alter_partition_args:
   Attributes:
    - db_name
    - tbl_name
+   - part_vals
    - new_part
   """
 
@@ -10168,12 +10172,14 @@ class alter_partition_args:
     None, # 0
     (1, TType.STRING, 'db_name', None, None, ), # 1
     (2, TType.STRING, 'tbl_name', None, None, ), # 2
-    (3, TType.STRUCT, 'new_part', (Partition, Partition.thrift_spec), None, ), # 3
+    (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3
+    (4, TType.STRUCT, 'new_part', (Partition, Partition.thrift_spec), None, ), # 4
   )
 
-  def __init__(self, db_name=None, tbl_name=None, new_part=None,):
+  def __init__(self, db_name=None, tbl_name=None, part_vals=None, new_part=None,):
     self.db_name = db_name
     self.tbl_name = tbl_name
+    self.part_vals = part_vals
     self.new_part = new_part
 
   def read(self, iprot):
@@ -10196,6 +10202,16 @@ class alter_partition_args:
         else:
           iprot.skip(ftype)
       elif fid == 3:
+        if ftype == TType.LIST:
+          self.part_vals = []
+          (_etype389, _size386) = iprot.readListBegin()
+          for _i390 in xrange(_size386):
+            _elem391 = iprot.readString();
+            self.part_vals.append(_elem391)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
         if ftype == TType.STRUCT:
           self.new_part = Partition()
           self.new_part.read(iprot)
@@ -10219,8 +10235,15 @@ class alter_partition_args:
       oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
       oprot.writeString(self.tbl_name)
       oprot.writeFieldEnd()
+    if self.part_vals is not None:
+      oprot.writeFieldBegin('part_vals', TType.LIST, 3)
+      oprot.writeListBegin(TType.STRING, len(self.part_vals))
+      for iter392 in self.part_vals:
+        oprot.writeString(iter392)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
     if self.new_part is not None:
-      oprot.writeFieldBegin('new_part', TType.STRUCT, 3)
+      oprot.writeFieldBegin('new_part', TType.STRUCT, 4)
       self.new_part.write(oprot)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -10547,10 +10570,10 @@ class partition_name_to_vals_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype389, _size386) = iprot.readListBegin()
-          for _i390 in xrange(_size386):
-            _elem391 = iprot.readString();
-            self.success.append(_elem391)
+          (_etype396, _size393) = iprot.readListBegin()
+          for _i397 in xrange(_size393):
+            _elem398 = iprot.readString();
+            self.success.append(_elem398)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -10573,8 +10596,8 @@ class partition_name_to_vals_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter392 in self.success:
-        oprot.writeString(iter392)
+      for iter399 in self.success:
+        oprot.writeString(iter399)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -10687,11 +10710,11 @@ class partition_name_to_spec_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype394, _vtype395, _size393 ) = iprot.readMapBegin() 
-          for _i397 in xrange(_size393):
-            _key398 = iprot.readString();
-            _val399 = iprot.readString();
-            self.success[_key398] = _val399
+          (_ktype401, _vtype402, _size400 ) = iprot.readMapBegin() 
+          for _i404 in xrange(_size400):
+            _key405 = iprot.readString();
+            _val406 = iprot.readString();
+            self.success[_key405] = _val406
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -10714,9 +10737,9 @@ class partition_name_to_spec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
-      for kiter400,viter401 in self.success.items():
-        oprot.writeString(kiter400)
-        oprot.writeString(viter401)
+      for kiter407,viter408 in self.success.items():
+        oprot.writeString(kiter407)
+        oprot.writeString(viter408)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -10786,11 +10809,11 @@ class markPartitionForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype403, _vtype404, _size402 ) = iprot.readMapBegin() 
-          for _i406 in xrange(_size402):
-            _key407 = iprot.readString();
-            _val408 = iprot.readString();
-            self.part_vals[_key407] = _val408
+          (_ktype410, _vtype411, _size409 ) = iprot.readMapBegin() 
+          for _i413 in xrange(_size409):
+            _key414 = iprot.readString();
+            _val415 = iprot.readString();
+            self.part_vals[_key414] = _val415
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -10820,9 +10843,9 @@ class markPartitionForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter409,viter410 in self.part_vals.items():
-        oprot.writeString(kiter409)
-        oprot.writeString(viter410)
+      for kiter416,viter417 in self.part_vals.items():
+        oprot.writeString(kiter416)
+        oprot.writeString(viter417)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -11018,11 +11041,11 @@ class isPartitionMarkedForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype412, _vtype413, _size411 ) = iprot.readMapBegin() 
-          for _i415 in xrange(_size411):
-            _key416 = iprot.readString();
-            _val417 = iprot.readString();
-            self.part_vals[_key416] = _val417
+          (_ktype419, _vtype420, _size418 ) = iprot.readMapBegin() 
+          for _i422 in xrange(_size418):
+            _key423 = iprot.readString();
+            _val424 = iprot.readString();
+            self.part_vals[_key423] = _val424
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -11052,9 +11075,9 @@ class isPartitionMarkedForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter418,viter419 in self.part_vals.items():
-        oprot.writeString(kiter418)
-        oprot.writeString(viter419)
+      for kiter425,viter426 in self.part_vals.items():
+        oprot.writeString(kiter425)
+        oprot.writeString(viter426)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -12026,11 +12049,11 @@ class get_indexes_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype423, _size420) = iprot.readListBegin()
-          for _i424 in xrange(_size420):
-            _elem425 = Index()
-            _elem425.read(iprot)
-            self.success.append(_elem425)
+          (_etype430, _size427) = iprot.readListBegin()
+          for _i431 in xrange(_size427):
+            _elem432 = Index()
+            _elem432.read(iprot)
+            self.success.append(_elem432)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12059,8 +12082,8 @@ class get_indexes_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter426 in self.success:
-        iter426.write(oprot)
+      for iter433 in self.success:
+        iter433.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -12201,10 +12224,10 @@ class get_index_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype430, _size427) = iprot.readListBegin()
-          for _i431 in xrange(_size427):
-            _elem432 = iprot.readString();
-            self.success.append(_elem432)
+          (_etype437, _size434) = iprot.readListBegin()
+          for _i438 in xrange(_size434):
+            _elem439 = iprot.readString();
+            self.success.append(_elem439)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12227,8 +12250,8 @@ class get_index_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter433 in self.success:
-        oprot.writeString(iter433)
+      for iter440 in self.success:
+        oprot.writeString(iter440)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -12588,10 +12611,10 @@ class get_role_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype437, _size434) = iprot.readListBegin()
-          for _i438 in xrange(_size434):
-            _elem439 = iprot.readString();
-            self.success.append(_elem439)
+          (_etype444, _size441) = iprot.readListBegin()
+          for _i445 in xrange(_size441):
+            _elem446 = iprot.readString();
+            self.success.append(_elem446)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12614,8 +12637,8 @@ class get_role_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter440 in self.success:
-        oprot.writeString(iter440)
+      for iter447 in self.success:
+        oprot.writeString(iter447)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -13088,11 +13111,11 @@ class list_roles_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype444, _size441) = iprot.readListBegin()
-          for _i445 in xrange(_size441):
-            _elem446 = Role()
-            _elem446.read(iprot)
-            self.success.append(_elem446)
+          (_etype451, _size448) = iprot.readListBegin()
+          for _i452 in xrange(_size448):
+            _elem453 = Role()
+            _elem453.read(iprot)
+            self.success.append(_elem453)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13115,8 +13138,8 @@ class list_roles_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter447 in self.success:
-        iter447.write(oprot)
+      for iter454 in self.success:
+        iter454.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -13184,10 +13207,10 @@ class get_privilege_set_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype451, _size448) = iprot.readListBegin()
-          for _i452 in xrange(_size448):
-            _elem453 = iprot.readString();
-            self.group_names.append(_elem453)
+          (_etype458, _size455) = iprot.readListBegin()
+          for _i459 in xrange(_size455):
+            _elem460 = iprot.readString();
+            self.group_names.append(_elem460)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13212,8 +13235,8 @@ class get_privilege_set_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter454 in self.group_names:
-        oprot.writeString(iter454)
+      for iter461 in self.group_names:
+        oprot.writeString(iter461)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -13420,11 +13443,11 @@ class list_privileges_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype458, _size455) = iprot.readListBegin()
-          for _i459 in xrange(_size455):
-            _elem460 = HiveObjectPrivilege()
-            _elem460.read(iprot)
-            self.success.append(_elem460)
+          (_etype465, _size462) = iprot.readListBegin()
+          for _i466 in xrange(_size462):
+            _elem467 = HiveObjectPrivilege()
+            _elem467.read(iprot)
+            self.success.append(_elem467)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13447,8 +13470,8 @@ class list_privileges_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter461 in self.success:
-        iter461.write(oprot)
+      for iter468 in self.success:
+        iter468.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:

Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Sat Oct 29 00:07:10 2011
@@ -640,13 +640,13 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_names failed: unknown result')
     end
 
-    def alter_partition(db_name, tbl_name, new_part)
-      send_alter_partition(db_name, tbl_name, new_part)
+    def alter_partition(db_name, tbl_name, part_vals, new_part)
+      send_alter_partition(db_name, tbl_name, part_vals, new_part)
       recv_alter_partition()
     end
 
-    def send_alter_partition(db_name, tbl_name, new_part)
-      send_message('alter_partition', Alter_partition_args, :db_name => db_name, :tbl_name => tbl_name, :new_part => new_part)
+    def send_alter_partition(db_name, tbl_name, part_vals, new_part)
+      send_message('alter_partition', Alter_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :new_part => new_part)
     end
 
     def recv_alter_partition()
@@ -1553,7 +1553,7 @@ module ThriftHiveMetastore
       args = read_args(iprot, Alter_partition_args)
       result = Alter_partition_result.new()
       begin
-        @handler.alter_partition(args.db_name, args.tbl_name, args.new_part)
+        @handler.alter_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part)
       rescue InvalidOperationException => o1
         result.o1 = o1
       rescue MetaException => o2
@@ -3307,11 +3307,13 @@ module ThriftHiveMetastore
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1
     TBL_NAME = 2
-    NEW_PART = 3
+    PART_VALS = 3
+    NEW_PART = 4
 
     FIELDS = {
       DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
       TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
+      PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}},
       NEW_PART => {:type => ::Thrift::Types::STRUCT, :name => 'new_part', :class => Partition}
     }
 

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java Sat Oct 29 00:07:10 2011
@@ -172,7 +172,7 @@ public class HiveAlterHandler implements
                                       oldUri.getAuthority(),
                                       newPath);
             part.getSd().setLocation(newPartLocPath.toString());
-            msdb.alterPartition(dbname, name, part);
+            msdb.alterPartition(dbname, name, part.getValues(), part);
           }
         }
       }

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Sat Oct 29 00:07:10 2011
@@ -40,6 +40,7 @@ import org.apache.commons.cli.OptionBuil
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.LogUtils;
@@ -1813,46 +1814,28 @@ public class HiveMetaStore extends Thrif
       return ret;
     }
 
-    private void alter_partition_core(final RawStore ms, final String db_name,
-        final String tbl_name, final Partition new_part)
-        throws InvalidOperationException, MetaException, TException {
-      try {
-        // Set DDL time to now if not specified
-        if (new_part.getParameters() == null ||
-            new_part.getParameters().get(Constants.DDL_TIME) == null ||
-            Integer.parseInt(new_part.getParameters().get(Constants.DDL_TIME)) == 0) {
-          new_part.putToParameters(Constants.DDL_TIME, Long.toString(System
-              .currentTimeMillis() / 1000));
-        }
-        Partition oldPart = ms.getPartition(db_name, tbl_name, new_part.getValues());
-        ms.alterPartition(db_name, tbl_name, new_part);
-        for (MetaStoreEventListener listener : listeners) {
-          listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this));
-        }
-      } catch (InvalidObjectException e) {
-        throw new InvalidOperationException("alter is not possible");
-      } catch (NoSuchObjectException e){
-        //old partition does not exist
-        throw new InvalidOperationException("alter is not possible");
-      }
-    }
-
-    public void alter_partition(final String db_name, final String tbl_name,
-        final Partition new_part) throws InvalidOperationException, MetaException,
+    public void alter_partition(final String db_name, final String tbl_name, 
+        final List<String> part_vals, final Partition new_part)
+        throws InvalidOperationException, MetaException,
         TException {
       startTableFunction("alter_partition", db_name, tbl_name);
-      LOG.info("Partition values:" + new_part.getValues());
-
+      LOG.info("New partition values:" + new_part.getValues());
+      if (part_vals != null && part_vals.size() > 0) {
+        LOG.info("Old Partition values:" + part_vals);
+      }
+      
       try {
         executeWithRetry(new Command<Boolean>() {
           @Override
           public Boolean run(RawStore ms) throws Exception {
-            alter_partition_core(ms, db_name, tbl_name, new_part);
+            alter_partition_core(ms, db_name, tbl_name, part_vals, new_part);
             return Boolean.TRUE;
           }
         });
-      } catch (InvalidOperationException e) {
-        throw e;
+      } catch (InvalidObjectException e) {
+        throw new InvalidOperationException(e.getMessage());
+      } catch (AlreadyExistsException e) {
+        throw new InvalidOperationException(e.getMessage());
       } catch (MetaException e) {
         throw e;
       } catch (TException e) {
@@ -1866,6 +1849,149 @@ public class HiveMetaStore extends Thrif
       return;
     }
 
+    private void alter_partition_core(final RawStore ms, final String dbname, final String name, final List<String> part_vals, final Partition new_part)
+        throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+      boolean success = false;
+
+      Path srcPath = null;
+      Path destPath = null;
+      FileSystem srcFs = null;
+      FileSystem destFs = null;
+      Table tbl = null;
+      Partition oldPart = null;
+      String oldPartLoc = null;
+      String newPartLoc = null;
+      // Set DDL time to now if not specified
+      if (new_part.getParameters() == null ||
+          new_part.getParameters().get(Constants.DDL_TIME) == null ||
+          Integer.parseInt(new_part.getParameters().get(Constants.DDL_TIME)) == 0) {
+        new_part.putToParameters(Constants.DDL_TIME, Long.toString(System
+            .currentTimeMillis() / 1000));
+      }
+      //alter partition
+      if (part_vals == null || part_vals.size() == 0) {
+        try {
+          oldPart = ms.getPartition(dbname, name, new_part.getValues());
+          ms.alterPartition(dbname, name, new_part.getValues(), new_part);
+          for (MetaStoreEventListener listener : listeners) {
+            listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this));
+          }
+        } catch (InvalidObjectException e) {
+          throw new InvalidOperationException("alter is not possible");
+        } catch (NoSuchObjectException e){
+          //old partition does not exist
+          throw new InvalidOperationException("alter is not possible");
+        }
+        return;
+      }
+      //rename partition
+      try {
+        ms.openTransaction();
+        try {
+          oldPart = ms.getPartition(dbname, name, part_vals);
+        } catch (NoSuchObjectException e) {
+          // this means there is no existing partition
+          throw new InvalidObjectException(
+              "Unable to rename partition because old partition does not exist");
+        }
+        Partition check_part = null;
+        try {
+          check_part = ms.getPartition(dbname, name, new_part.getValues());
+        } catch(NoSuchObjectException e) {
+          // this means there is no existing partition
+          check_part = null;
+        }
+        if (check_part != null) {
+          throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." + new_part.getValues());
+        }
+        tbl = ms.getTable(dbname, name);
+        if (tbl == null) {
+          throw new InvalidObjectException(
+              "Unable to rename partition because table or database do not exist");
+        }
+        try {
+          destPath = new Path(wh.getTablePath(ms.getDatabase(dbname), name), Warehouse.makePartName(tbl.getPartitionKeys(), 
+            new_part.getValues()));
+        } catch (NoSuchObjectException e) {
+          LOG.debug(e);
+          throw new InvalidOperationException(
+              "Unable to change partition or table. Database " + dbname + " does not exist"
+                  + " Check metastore logs for detailed stack." + e.getMessage());
+        }
+        if (destPath != null) {
+          newPartLoc = destPath.toString();
+          oldPartLoc = oldPart.getSd().getLocation();
+
+          srcPath = new Path(oldPartLoc);
+
+          LOG.info("srcPath:" + oldPartLoc);
+          LOG.info("descPath:" + newPartLoc);
+          srcFs = wh.getFs(srcPath);
+          destFs = wh.getFs(destPath);
+          // check that src and dest are on the same file system
+          if (srcFs != destFs) {
+            throw new InvalidOperationException("table new location " + destPath
+                + " is on a different file system than the old location "
+                + srcPath + ". This operation is not supported");
+          }
+          try {
+            srcFs.exists(srcPath); // check that src exists and also checks
+            if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
+              throw new InvalidOperationException("New location for this table "
+                  + tbl.getDbName() + "." + tbl.getTableName()
+                  + " already exists : " + destPath);
+            }
+          } catch (IOException e) {
+            Warehouse.closeFs(srcFs);
+            Warehouse.closeFs(destFs);
+            throw new InvalidOperationException("Unable to access new location "
+                + destPath + " for partition " + tbl.getDbName() + "."
+                + tbl.getTableName() + " " + new_part.getValues());
+          }
+          new_part.getSd().setLocation(newPartLoc);
+          ms.alterPartition(dbname, name, part_vals, new_part);
+        }
+
+        success = ms.commitTransaction();
+      } finally {
+        if (!success) {
+          ms.rollbackTransaction();
+        }
+        if (success && newPartLoc.compareTo(oldPartLoc) != 0) {
+          //rename the data directory
+          try{
+            if (srcFs.exists(srcPath)) {
+              //if destPath's parent path doesn't exist, we should mkdir it
+              Path destParentPath = destPath.getParent();
+              if (!wh.mkdirs(destParentPath)) {
+                  throw new IOException("Unable to create path " + destParentPath);
+              }
+              srcFs.rename(srcPath, destPath);
+              LOG.info("rename done!");
+            }
+          } catch (IOException e) {
+            boolean revertMetaDataTransaction = false;
+            try {
+              ms.openTransaction();
+              ms.alterPartition(dbname, name, new_part.getValues(), oldPart);
+              revertMetaDataTransaction = ms.commitTransaction();
+            } catch (Exception e1) {
+              LOG.error("Reverting metadata opeation failed During HDFS operation failed", e1);
+              if (!revertMetaDataTransaction) {
+                ms.rollbackTransaction();
+              }
+            }
+            throw new InvalidOperationException("Unable to access old location "
+                + srcPath + " for partition " + tbl.getDbName() + "."
+                + tbl.getTableName() + " " + part_vals);
+          }
+        }
+        for (MetaStoreEventListener listener : listeners) {
+          listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this));
+        }
+      }
+    }
+
     public boolean create_index(Index index_def)
         throws IndexAlreadyExistsException, MetaException {
       endFunction(startFunction("create_index"));

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Sat Oct 29 00:07:10 2011
@@ -167,6 +167,22 @@ public class HiveMetaStoreClient impleme
     client.alter_table(dbname, tbl_name, new_tbl);
   }
 
+  /**
+   * @param dbname
+   * @param name
+   * @param part_vals
+   * @param newPart
+   * @throws InvalidOperationException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition(
+   *      java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition)
+   */
+  public void renamePartition(final String dbname, final String name, final List<String> part_vals, final Partition newPart)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partition(dbname, name, part_vals, newPart);
+  }
+
   private void open() throws MetaException {
     for (URI store : metastoreUris) {
       LOG.info("Trying to connect to metastore with URI " + store);
@@ -778,7 +794,7 @@ public class HiveMetaStoreClient impleme
 
   public void alter_partition(String dbName, String tblName, Partition newPart)
       throws InvalidOperationException, MetaException, TException {
-    client.alter_partition(dbName, tblName, newPart);
+    client.alter_partition(dbName, tblName, null, newPart);
   }
 
   public void alterDatabase(String dbName, Database db)

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Sat Oct 29 00:07:10 2011
@@ -555,6 +555,27 @@ public interface IMetaStoreClient {
       throws InvalidOperationException, MetaException, TException;
 
   /**
+   * rename a partition to a new partition
+   *
+   * @param dbname
+   *          database of the old partition
+   * @param name
+   *          table name of the old partition
+   * @param part_vals
+   *          values of the old partition
+   * @param newPart
+   *          new partition
+   * @throws InvalidOperationException
+   *           if srcFs and destFs are different
+   * @throws MetaException
+   *          if error in updating metadata
+   * @throws TException
+   *          if error in communicating with metastore server
+   */
+  public void renamePartition(final String dbname, final String name, final List<String> part_vals, final Partition newPart)
+      throws InvalidOperationException, MetaException, TException;
+
+  /**
    * @param db
    * @param tableName
    * @throws UnknownTableException

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Sat Oct 29 00:07:10 2011
@@ -1892,18 +1892,20 @@ public class ObjectStore implements RawS
     }
   }
 
-  public void alterPartition(String dbname, String name, Partition newPart)
+  public void alterPartition(String dbname, String name, List<String> part_vals, Partition newPart)
       throws InvalidObjectException, MetaException {
     boolean success = false;
     try {
       openTransaction();
       name = name.toLowerCase();
       dbname = dbname.toLowerCase();
-      MPartition oldp = getMPartition(dbname, name, newPart.getValues());
+      MPartition oldp = getMPartition(dbname, name, part_vals);
       MPartition newp = convertToMPart(newPart, false);
       if (oldp == null || newp == null) {
         throw new InvalidObjectException("partition does not exist.");
       }
+      oldp.setValues(newp.getValues());
+      oldp.setPartitionName(newp.getPartitionName());
       oldp.setParameters(newPart.getParameters());
       copyMSD(newp.getSd(), oldp.getSd());
       if (newp.getCreateTime() != oldp.getCreateTime()) {

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Sat Oct 29 00:07:10 2011
@@ -158,7 +158,7 @@ public interface RawStore extends Config
   public abstract List<String> listPartitionNamesByFilter(String db_name,
       String tbl_name, String filter, short max_parts) throws MetaException;
 
-  public abstract void alterPartition(String db_name, String tbl_name,
+  public abstract void alterPartition(String db_name, String tbl_name, List<String> part_vals,
       Partition new_part) throws InvalidObjectException, MetaException;
 
   public abstract boolean addIndex(Index index)
@@ -303,6 +303,6 @@ public interface RawStore extends Config
   public abstract List<Partition> listPartitionsPsWithAuth(String db_name, String tbl_name,
       List<String> part_vals, short max_parts, String userName, List<String> groupNames)
       throws MetaException, InvalidObjectException;
- 
+
  public abstract long cleanupEvents();
 }

Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Sat Oct 29 00:07:10 2011
@@ -521,6 +521,136 @@ public abstract class TestHiveMetaStore 
     }
   }
 
+  public void testRenamePartition() throws Throwable {
+
+    try {
+      String dbName = "compdb1";
+      String tblName = "comptbl1";
+      List<String> vals = new ArrayList<String>(2);
+      vals.add("2011-07-11");
+      vals.add("8");
+      String part_path = "/ds=2011-07-11/hr=8";
+      List<String> tmp_vals = new ArrayList<String>(2);
+      tmp_vals.add("tmp_2011-07-11");
+      tmp_vals.add("-8");
+      String part2_path = "/ds=tmp_2011-07-11/hr=-8";
+
+      client.dropTable(dbName, tblName);
+      silentDropDatabase(dbName);
+      Database db = new Database();
+      db.setName(dbName);
+      db.setDescription("Rename Partition Test database");
+      client.createDatabase(db);
+
+      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
+      cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
+      cols.add(new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
+
+      Table tbl = new Table();
+      tbl.setDbName(dbName);
+      tbl.setTableName(tblName);
+      StorageDescriptor sd = new StorageDescriptor();
+      tbl.setSd(sd);
+      sd.setCols(cols);
+      sd.setCompressed(false);
+      sd.setNumBuckets(1);
+      sd.setParameters(new HashMap<String, String>());
+      sd.getParameters().put("test_param_1", "Use this for comments etc");
+      sd.setBucketCols(new ArrayList<String>(2));
+      sd.getBucketCols().add("name");
+      sd.setSerdeInfo(new SerDeInfo());
+      sd.getSerdeInfo().setName(tbl.getTableName());
+      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+      sd.getSerdeInfo().getParameters()
+          .put(Constants.SERIALIZATION_FORMAT, "1");
+      sd.setSortCols(new ArrayList<Order>());
+
+      tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
+      tbl.getPartitionKeys().add(
+          new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
+      tbl.getPartitionKeys().add(
+          new FieldSchema("hr", Constants.INT_TYPE_NAME, ""));
+
+      client.createTable(tbl);
+
+      if (isThriftClient) {
+        // the createTable() above does not update the location in the 'tbl'
+        // object when the client is a thrift client and the code below relies
+        // on the location being present in the 'tbl' object - so get the table
+        // from the metastore
+        tbl = client.getTable(dbName, tblName);
+      }
+
+      Partition part = new Partition();
+      part.setDbName(dbName);
+      part.setTableName(tblName);
+      part.setValues(vals);
+      part.setParameters(new HashMap<String, String>());
+      part.setSd(tbl.getSd().deepCopy());
+      part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+      part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
+      part.getParameters().put("retention", "10");
+      part.getSd().setNumBuckets(12);
+      part.getSd().getSerdeInfo().getParameters().put("abc", "1");
+
+      client.add_partition(part);
+
+      part.setValues(tmp_vals);
+      client.renamePartition(dbName, tblName, vals, part);
+
+      boolean exceptionThrown = false;
+      try {
+        Partition p = client.getPartition(dbName, tblName, vals);
+      } catch(Exception e) {
+        assertEquals("partition should not have existed",
+            NoSuchObjectException.class, e.getClass());
+        exceptionThrown = true;
+      }
+      assertTrue("Expected NoSuchObjectException", exceptionThrown);
+
+      Partition part3 = client.getPartition(dbName, tblName, tmp_vals);
+      assertEquals("couldn't rename partition", part3.getParameters().get(
+          "retention"), "10");
+      assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
+          .getParameters().get("abc"), "1");
+      assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
+          12);
+      assertEquals("new partition sd matches", part3.getSd().getLocation(),
+          tbl.getSd().getLocation() + part2_path);
+
+      part.setValues(vals);
+      client.renamePartition(dbName, tblName, tmp_vals, part);
+
+      exceptionThrown = false;
+      try {
+        Partition p = client.getPartition(dbName, tblName, tmp_vals);
+      } catch(Exception e) {
+        assertEquals("partition should not have existed",
+            NoSuchObjectException.class, e.getClass());
+        exceptionThrown = true;
+      }
+      assertTrue("Expected NoSuchObjectException", exceptionThrown);
+
+      part3 = client.getPartition(dbName, tblName, vals);
+      assertEquals("couldn't rename partition", part3.getParameters().get(
+          "retention"), "10");
+      assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
+          .getParameters().get("abc"), "1");
+      assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
+          12);
+      assertEquals("new partition sd matches", part3.getSd().getLocation(),
+          tbl.getSd().getLocation() + part_path);
+
+      client.dropTable(dbName, tblName);
+
+      client.dropDatabase(dbName);
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testRenamePartition() failed.");
+      throw e;
+    }
+  }
+
   public void testDatabase() throws Throwable {
     try {
       // clear up any existing databases

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Sat Oct 29 00:07:10 2011
@@ -117,6 +117,7 @@ import org.apache.hadoop.hive.ql.plan.Ms
 import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
 import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
 import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc;
+import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc;
 import org.apache.hadoop.hive.ql.plan.RevokeDesc;
 import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
 import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
@@ -263,6 +264,11 @@ public class DDLTask extends Task<DDLWor
         return addPartition(db, addPartitionDesc);
       }
 
+      RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
+      if (renamePartitionDesc != null) {
+        return renamePartition(db, renamePartitionDesc);
+      }
+
       AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
       if (simpleDesc != null) {
         if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
@@ -969,6 +975,34 @@ public class DDLTask extends Task<DDLWor
   }
 
   /**
+   * Rename a partition in a table
+   *
+   * @param db
+   *          Database to rename the partition.
+   * @param renamePartitionDesc
+   *          rename old Partition to new one.
+   * @return Returns 0 when execution succeeds and above 0 if it fails.
+   * @throws HiveException
+   */
+  private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) throws HiveException {
+
+    Table tbl = db.getTable(renamePartitionDesc.getDbName(), renamePartitionDesc.getTableName());
+
+    validateAlterTableType(
+      tbl, AlterTableDesc.AlterTableTypes.RENAMEPARTITION,
+      false);
+    Partition oldPart = db.getPartition(tbl, renamePartitionDesc.getOldPartSpec(), false);
+    Partition part = db.getPartition(tbl, renamePartitionDesc.getOldPartSpec(), false);
+    part.setValues(renamePartitionDesc.getNewPartSpec());
+    db.renamePartition(tbl, renamePartitionDesc.getOldPartSpec(), part);
+    Partition newPart = db
+        .getPartition(tbl, renamePartitionDesc.getNewPartSpec(), false);
+    work.getInputs().add(new ReadEntity(oldPart));
+    work.getOutputs().add(new WriteEntity(newPart));
+    return 0;
+  }
+
+  /**
    * Rewrite the partition's metadata and force the pre/post execute hooks to
    * be fired.
    *
@@ -1507,6 +1541,7 @@ public class DDLTask extends Task<DDLWor
       switch (alterType) {
       case ADDPARTITION:
       case DROPPARTITION:
+      case RENAMEPARTITION:
       case ADDPROPS:
       case RENAME:
         // allow this form

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Sat Oct 29 00:07:10 2011
@@ -63,6 +63,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.HiveObjectType;
 import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
@@ -432,6 +433,53 @@ public class Hive {
     }
   }
 
+  /**
+   * Rename a old partition to new partition
+   *
+   * @param tbl
+   *          existing table
+   * @param oldPartSpec
+   *          spec of old partition
+   * @param newPart
+   *          new partition
+   * @throws InvalidOperationException
+   *           if the changes in metadata is not acceptable
+   * @throws TException
+   */
+  public void renamePartition(Table tbl, Map<String, String> oldPartSpec, Partition newPart)
+      throws HiveException {
+    try {
+      Map<String, String> newPartSpec = newPart.getSpec();
+      if (oldPartSpec.keySet().size() != tbl.getPartCols().size()
+          || newPartSpec.keySet().size() != tbl.getPartCols().size()) {
+        throw new HiveException("Unable to rename partition to the same name: number of partition cols don't match. ");
+      }
+      if (!oldPartSpec.keySet().equals(newPartSpec.keySet())){
+        throw new HiveException("Unable to rename partition to the same name: old and new partition cols don't match. ");
+      }
+      List<String> pvals = new ArrayList<String>();
+      
+      for (FieldSchema field : tbl.getPartCols()) {
+        String val = oldPartSpec.get(field.getName());
+        if (val == null || val.length() == 0) {
+          throw new HiveException("get partition: Value for key "
+              + field.getName() + " is null or empty");
+        } else if (val != null){
+          pvals.add(val);
+        }
+      }
+      getMSC().renamePartition(tbl.getDbName(), tbl.getTableName(), pvals,
+          newPart.getTPartition());
+
+    } catch (InvalidOperationException e){
+      throw new HiveException("Unable to rename partition.", e);
+    } catch (MetaException e) {
+      throw new HiveException("Unable to rename partition.", e);
+    } catch (TException e) {
+      throw new HiveException("Unable to rename partition.", e);
+    }
+  }
+
   public void alterDatabase(String dbName, Database db)
       throws HiveException {
     try {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Sat Oct 29 00:07:10 2011
@@ -518,6 +518,28 @@ public class Partition implements Serial
   }
 
   /**
+   * Set Partition's values
+   *
+   * @param partSpec
+   *          Partition specifications.
+   * @throws HiveException
+   *           Thrown if we could not create the partition.
+   */
+  public void setValues(Map<String, String> partSpec) 
+      throws HiveException {
+    List<String> pvals = new ArrayList<String>();
+    for (FieldSchema field : table.getPartCols()) {
+      String val = partSpec.get(field.getName());
+      if (val == null) {
+        throw new HiveException(
+            "partition spec is invalid. field.getName() does not exist in input.");
+      }
+      pvals.add(val);
+    }
+    tPartition.setValues(pvals);
+  }
+
+  /**
    * @param protectMode
    */
   public void setProtectMode(ProtectMode protectMode){

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Sat Oct 29 00:07:10 2011
@@ -93,6 +93,7 @@ import org.apache.hadoop.hive.ql.plan.Ms
 import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
 import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
 import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc;
+import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc;
 import org.apache.hadoop.hive.ql.plan.RevokeDesc;
 import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
 import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
@@ -199,6 +200,8 @@ public class DDLSemanticAnalyzer extends
         analyzeAlterTableSerde(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES) {
         analyzeAlterTableSerdeProps(ast, tableName, partSpec);
+      } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) {
+        analyzeAlterTableRenamePart(ast, tableName, partSpec);
       }
       break;
     }
@@ -1685,6 +1688,32 @@ public class DDLSemanticAnalyzer extends
         alterTblDesc), conf));
   }
 
+  private void analyzeAlterTableRenamePart(ASTNode ast, String tblName,
+      HashMap<String, String> oldPartSpec) throws SemanticException {
+    Map<String, String> newPartSpec = extractPartitionSpecs((ASTNode)ast.getChild(0));
+    if (newPartSpec == null) {
+      throw new SemanticException("RENAME PARTITION Missing Destination" + ast);
+    }
+    try {
+      Table tab = db.getTable(db.getCurrentDatabase(), tblName, false);
+      if (tab != null) {
+        inputs.add(new ReadEntity(tab));
+      } else {
+        throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName));
+      }
+    } catch (HiveException e) {
+      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName));
+    }
+    List<Map<String, String>> partSpecs = new ArrayList<Map<String, String>>();
+    partSpecs.add(oldPartSpec);
+    partSpecs.add(newPartSpec);
+    addTablePartsOutputs(tblName, partSpecs);
+    RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc(
+        db.getCurrentDatabase(), tblName, oldPartSpec, newPartSpec);
+    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+        renamePartitionDesc), conf));
+  }
+
   private void analyzeAlterTableModifyCols(ASTNode ast,
       AlterTableTypes alterType) throws SemanticException {
     String tblName = getUnescapedName((ASTNode)ast.getChild(0));

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g Sat Oct 29 00:07:10 2011
@@ -121,6 +121,7 @@ TOK_ALTERTABLE_PARTITION;
 TOK_ALTERTABLE_RENAME;
 TOK_ALTERTABLE_ADDCOLS;
 TOK_ALTERTABLE_RENAMECOL;
+TOK_ALTERTABLE_RENAMEPART;
 TOK_ALTERTABLE_REPLACECOLS;
 TOK_ALTERTABLE_ADDPARTS;
 TOK_ALTERTABLE_DROPPARTS;
@@ -638,6 +639,7 @@ alterStatementSuffixRenameCol
     : Identifier KW_CHANGE KW_COLUMN? oldName=Identifier newName=Identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition?
     ->^(TOK_ALTERTABLE_RENAMECOL Identifier $oldName $newName colType $comment? alterStatementChangeColPosition?)
     ;
+    
 
 alterStatementChangeColPosition
     : first=KW_FIRST|KW_AFTER afterCol=Identifier
@@ -732,6 +734,7 @@ alterTblPartitionStatementSuffix
   | alterStatementSuffixProtectMode
   | alterStatementSuffixMergeFiles
   | alterStatementSuffixSerdeProperties
+  | alterStatementSuffixRenamePart
   ;
 
 alterStatementSuffixFileFormat
@@ -755,6 +758,13 @@ alterStatementSuffixProtectMode
     -> ^(TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE alterProtectMode)
     ;
 
+alterStatementSuffixRenamePart
+@init { msgs.push("alter table rename partition statement"); }
+@after { msgs.pop(); }
+    : KW_RENAME KW_TO partitionSpec
+    ->^(TOK_ALTERTABLE_RENAMEPART partitionSpec)
+    ;
+
 alterStatementSuffixMergeFiles
 @init { msgs.push(""); }
 @after { msgs.pop(); }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java Sat Oct 29 00:07:10 2011
@@ -110,6 +110,8 @@ public final class SemanticAnalyzerFacto
     tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES,
         new HiveOperation[] {HiveOperation.ALTERTABLE_SERDEPROPERTIES,
             HiveOperation.ALTERPARTITION_SERDEPROPERTIES });
+    tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_RENAMEPART,
+        new HiveOperation[] {null, HiveOperation.ALTERTABLE_RENAMEPART});
   }
 
   public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree)

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java Sat Oct 29 00:07:10 2011
@@ -43,7 +43,7 @@ public class AlterTableDesc extends DDLD
     RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS,
     ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION,
     TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE,
-    ALTERLOCATION, DROPPARTITION
+    ALTERLOCATION, DROPPARTITION, RENAMEPARTITION
   };
 
   public static enum ProtectModeType {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java Sat Oct 29 00:07:10 2011
@@ -53,6 +53,7 @@ public class DDLWork implements Serializ
   private ShowPartitionsDesc showPartsDesc;
   private DescTableDesc descTblDesc;
   private AddPartitionDesc addPartitionDesc;
+  private RenamePartitionDesc renamePartitionDesc;
   private AlterTableSimpleDesc alterTblSimpleDesc;
   private MsckDesc msckDesc;
   private ShowTableStatusDesc showTblStatusDesc;
@@ -67,7 +68,7 @@ public class DDLWork implements Serializ
   private GrantRevokeRoleDDL grantRevokeRoleDDL;
 
   boolean needLock = false;
-  
+
   /**
    * ReadEntitites that are passed to the hooks.
    */
@@ -310,6 +311,17 @@ public class DDLWork implements Serializ
   }
 
   /**
+   * @param renamePartitionDesc
+   *          information about the partitions we want to add.
+   */
+  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+      RenamePartitionDesc renamePartitionDesc) {
+    this(inputs, outputs);
+
+    this.renamePartitionDesc = renamePartitionDesc;
+  }
+
+  /**
    * @param touchDesc
    *          information about the table/partitions that we want to touch
    */
@@ -713,6 +725,21 @@ public class DDLWork implements Serializ
   }
 
   /**
+   * @return information about the partitions we want to rename.
+   */
+  public RenamePartitionDesc getRenamePartitionDesc() {
+    return renamePartitionDesc;
+  }
+
+  /**
+   * @param renamePartitionDesc
+   *          information about the partitions we want to rename.
+   */
+  public void setRenamePartitionDesc(RenamePartitionDesc renamePartitionDesc) {
+    this.renamePartitionDesc = renamePartitionDesc;
+  }
+
+  /**
    * @return information about the table/partitions we want to alter.
    */
   public AlterTableSimpleDesc getAlterTblSimpleDesc() {
@@ -806,7 +833,7 @@ public class DDLWork implements Serializ
   public void setRoleDDLDesc(RoleDDLDesc roleDDLDesc) {
     this.roleDDLDesc = roleDDLDesc;
   }
-  
+
   /**
    * @return grant desc
    */
@@ -820,7 +847,7 @@ public class DDLWork implements Serializ
   public void setGrantDesc(GrantDesc grantDesc) {
     this.grantDesc = grantDesc;
   }
-  
+
   /**
    * @return show grant desc
    */
@@ -842,7 +869,7 @@ public class DDLWork implements Serializ
   public void setRevokeDesc(RevokeDesc revokeDesc) {
     this.revokeDesc = revokeDesc;
   }
-  
+
   /**
    * @return
    */
@@ -856,7 +883,7 @@ public class DDLWork implements Serializ
   public void setGrantRevokeRoleDDL(GrantRevokeRoleDDL grantRevokeRoleDDL) {
     this.grantRevokeRoleDDL = grantRevokeRoleDDL;
   }
-  
+
   public void setAlterDatabaseDesc(AlterDatabaseDesc alterDbDesc) {
     this.alterDbDesc = alterDbDesc;
   }
@@ -864,7 +891,7 @@ public class DDLWork implements Serializ
   public AlterDatabaseDesc getAlterDatabaseDesc() {
     return this.alterDbDesc;
   }
-  
+
   /**
    * @return descriptor for merging files
    */

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java?rev=1190696&r1=1190695&r2=1190696&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java Sat Oct 29 00:07:10 2011
@@ -21,7 +21,7 @@ package org.apache.hadoop.hive.ql.plan;
 import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 
 public enum HiveOperation {
-  
+
   EXPLAIN("EXPLAIN", null, null),
   LOAD("LOAD", null, new Privilege[]{Privilege.ALTER_DATA}),
   EXPORT("EXPORT", new Privilege[]{Privilege.SELECT}, null),
@@ -36,6 +36,7 @@ public enum HiveOperation {
   ALTERTABLE_ADDCOLS("ALTERTABLE_ADDCOLS", new Privilege[]{Privilege.ALTER_METADATA}, null),
   ALTERTABLE_REPLACECOLS("ALTERTABLE_REPLACECOLS", new Privilege[]{Privilege.ALTER_METADATA}, null),
   ALTERTABLE_RENAMECOL("ALTERTABLE_RENAMECOL", new Privilege[]{Privilege.ALTER_METADATA}, null),
+  ALTERTABLE_RENAMEPART("ALTERTABLE_RENAMEPART", new Privilege[]{Privilege.DROP}, new Privilege[]{Privilege.CREATE}),
   ALTERTABLE_RENAME("ALTERTABLE_RENAME", new Privilege[]{Privilege.ALTER_METADATA}, null),
   ALTERTABLE_DROPPARTS("ALTERTABLE_DROPPARTS", new Privilege[]{Privilege.DROP}, null),
   ALTERTABLE_ADDPARTS("ALTERTABLE_ADDPARTS", new Privilege[]{Privilege.CREATE}, null),
@@ -81,20 +82,20 @@ public enum HiveOperation {
   ALTERPARTITION_LOCATION("ALTERPARTITION_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null),
   CREATETABLE("CREATETABLE", null, new Privilege[]{Privilege.CREATE}),
   CREATETABLE_AS_SELECT("CREATETABLE_AS_SELECT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.CREATE}),
-  QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}), 
-  ALTERINDEX_PROPS("ALTERINDEX_PROPS",null, null), 
-  ALTERDATABASE("ALTERDATABASE", null, null), 
-  DESCDATABASE("DESCDATABASE", null, null), 
+  QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}),
+  ALTERINDEX_PROPS("ALTERINDEX_PROPS",null, null),
+  ALTERDATABASE("ALTERDATABASE", null, null),
+  DESCDATABASE("DESCDATABASE", null, null),
   ALTERTABLE_MERGEFILES("ALTER_TABLE_MERGE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }),
   ALTERPARTITION_MERGEFILES("ALTER_PARTITION_MERGE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }),
   ;
 
   private String operationName;
-  
+
   private Privilege[] inputRequiredPrivileges;
-  
+
   private Privilege[] outputRequiredPrivileges;
-  
+
   public Privilege[] getInputRequiredPrivileges() {
     return inputRequiredPrivileges;
   }
@@ -113,9 +114,9 @@ public enum HiveOperation {
     this.inputRequiredPrivileges = inputRequiredPrivileges;
     this.outputRequiredPrivileges = outputRequiredPrivileges;
   }
-  
+
   public static class PrivilegeAgreement {
-    
+
     private Privilege[] inputUserLevelRequiredPriv;
     private Privilege[] inputDBLevelRequiredPriv;
     private Privilege[] inputTableLevelRequiredPriv;
@@ -124,7 +125,7 @@ public enum HiveOperation {
     private Privilege[] outputDBLevelRequiredPriv;
     private Privilege[] outputTableLevelRequiredPriv;
     private Privilege[] outputColumnLevelRequiredPriv;
-    
+
     public PrivilegeAgreement putUserLevelRequiredPriv(
         Privilege[] inputUserLevelRequiredPriv,
         Privilege[] outputUserLevelRequiredPriv) {
@@ -140,7 +141,7 @@ public enum HiveOperation {
       this.outputDBLevelRequiredPriv = outputDBLevelRequiredPriv;
       return this;
     }
-    
+
     public PrivilegeAgreement putTableLevelRequiredPriv(
         Privilege[] inputTableLevelRequiredPriv,
         Privilege[] outputTableLevelRequiredPriv) {

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java?rev=1190696&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java Sat Oct 29 00:07:10 2011
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * Contains the information needed to rename a partition.
+ */
+public class RenamePartitionDesc extends DDLDesc implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  String tableName;
+  String dbName;
+  String location;
+  LinkedHashMap<String, String> oldPartSpec;
+  LinkedHashMap<String, String> newPartSpec;
+
+  /**
+   * For serialization only.
+   */
+  public RenamePartitionDesc() {
+  }
+
+  /**
+   * @param dbName
+   *          database to add to.
+   * @param tableName
+   *          table to add to.
+   * @param oldPartSpec
+   *          old partition specification.
+   * @param newPartSpec
+   *          new partition specification.
+   */
+  public RenamePartitionDesc(String dbName, String tableName,
+      Map<String, String> oldPartSpec, Map<String, String> newPartSpec) {
+    super();
+    this.dbName = dbName;
+    this.tableName = tableName;
+    this.oldPartSpec = new LinkedHashMap<String,String>(oldPartSpec);
+    this.newPartSpec = new LinkedHashMap<String,String>(newPartSpec);
+  }
+
+  /**
+   * @return database name
+   */
+  public String getDbName() {
+    return dbName;
+  }
+
+  /**
+   * @param dbName
+   *          database name
+   */
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  /**
+   * @return the table we're going to add the partitions to.
+   */
+  public String getTableName() {
+    return tableName;
+  }
+
+  /**
+   * @param tableName
+   *          the table we're going to add the partitions to.
+   */
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  /**
+   * @return location of partition in relation to table
+   */
+  public String getLocation() {
+    return location;
+  }
+
+  /**
+   * @param location
+   *          location of partition in relation to table
+   */
+  public void setLocation(String location) {
+    this.location = location;
+  }
+
+  /**
+   * @return old partition specification.
+   */
+  public LinkedHashMap<String, String> getOldPartSpec() {
+    return oldPartSpec;
+  }
+
+  /**
+   * @param partSpec
+   *          partition specification
+   */
+  public void setOldPartSpec(LinkedHashMap<String, String> partSpec) {
+    this.oldPartSpec = partSpec;
+  }
+
+  /**
+   * @return new partition specification.
+   */
+  public LinkedHashMap<String, String> getNewPartSpec() {
+    return newPartSpec;
+  }
+
+  /**
+   * @param partSpec
+   *          partition specification
+   */
+  public void setNewPartSpec(LinkedHashMap<String, String> partSpec) {
+    this.newPartSpec = partSpec;
+  }
+}

Added: hive/trunk/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q?rev=1190696&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q Sat Oct 29 00:07:10 2011
@@ -0,0 +1,6 @@
+create table alter_rename_partition_src ( col1 string ) stored as textfile ;
+load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ;
+create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile;
+insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ;
+
+alter table alter_rename_partition partition (pCol1='nonexist_part1:', pcol2='nonexist_part2:') rename to partition (pCol1='new_part1:', pcol2='new_part2:');