You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2012/09/16 19:17:17 UTC
svn commit: r1385320 [3/4] - in /hive/trunk: metastore/if/
metastore/src/gen/thrift/gen-cpp/
metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/
metastore/src/gen/thrift/gen-php/hive_metastore/
metastore/src/gen/thrift/gen-py/hi...
Modified: hive/trunk/metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php Sun Sep 16 17:17:16 2012
@@ -51,6 +51,7 @@ interface ThriftHiveMetastoreIf extends
public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
public function get_partitions_by_names($db_name, $tbl_name, $names);
public function alter_partition($db_name, $tbl_name, $new_part);
+ public function alter_partitions($db_name, $tbl_name, $new_parts);
public function alter_partition_with_environment_context($db_name, $tbl_name, $new_part, $environment_context);
public function rename_partition($db_name, $tbl_name, $part_vals, $new_part);
public function get_config_value($name, $defaultValue);
@@ -2487,6 +2488,62 @@ class ThriftHiveMetastoreClient extends
return;
}
+ public function alter_partitions($db_name, $tbl_name, $new_parts)
+ {
+ $this->send_alter_partitions($db_name, $tbl_name, $new_parts);
+ $this->recv_alter_partitions();
+ }
+
+ public function send_alter_partitions($db_name, $tbl_name, $new_parts)
+ {
+ $args = new ThriftHiveMetastore_alter_partitions_args();
+ $args->db_name = $db_name;
+ $args->tbl_name = $tbl_name;
+ $args->new_parts = $new_parts;
+ $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'alter_partitions', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('alter_partitions', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
+
+ public function recv_alter_partitions()
+ {
+ $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'ThriftHiveMetastore_alter_partitions_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
+ }
+ $result = new ThriftHiveMetastore_alter_partitions_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ return;
+ }
+
public function alter_partition_with_environment_context($db_name, $tbl_name, $new_part, $environment_context)
{
$this->send_alter_partition_with_environment_context($db_name, $tbl_name, $new_part, $environment_context);
@@ -13938,6 +13995,242 @@ class ThriftHiveMetastore_alter_partitio
}
+class ThriftHiveMetastore_alter_partitions_args {
+ static $_TSPEC;
+
+ public $db_name = null;
+ public $tbl_name = null;
+ public $new_parts = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'db_name',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'tbl_name',
+ 'type' => TType::STRING,
+ ),
+ 3 => array(
+ 'var' => 'new_parts',
+ 'type' => TType::LST,
+ 'etype' => TType::STRUCT,
+ 'elem' => array(
+ 'type' => TType::STRUCT,
+ 'class' => 'Partition',
+ ),
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['db_name'])) {
+ $this->db_name = $vals['db_name'];
+ }
+ if (isset($vals['tbl_name'])) {
+ $this->tbl_name = $vals['tbl_name'];
+ }
+ if (isset($vals['new_parts'])) {
+ $this->new_parts = $vals['new_parts'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_alter_partitions_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->db_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->tbl_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::LST) {
+ $this->new_parts = array();
+ $_size432 = 0;
+ $_etype435 = 0;
+ $xfer += $input->readListBegin($_etype435, $_size432);
+ for ($_i436 = 0; $_i436 < $_size432; ++$_i436)
+ {
+ $elem437 = null;
+ $elem437 = new Partition();
+ $xfer += $elem437->read($input);
+ $this->new_parts []= $elem437;
+ }
+ $xfer += $input->readListEnd();
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_args');
+ if ($this->db_name !== null) {
+ $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1);
+ $xfer += $output->writeString($this->db_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->tbl_name !== null) {
+ $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2);
+ $xfer += $output->writeString($this->tbl_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->new_parts !== null) {
+ if (!is_array($this->new_parts)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('new_parts', TType::LST, 3);
+ {
+ $output->writeListBegin(TType::STRUCT, count($this->new_parts));
+ {
+ foreach ($this->new_parts as $iter438)
+ {
+ $xfer += $iter438->write($output);
+ }
+ }
+ $output->writeListEnd();
+ }
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_alter_partitions_result {
+ static $_TSPEC;
+
+ public $o1 = null;
+ public $o2 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => 'InvalidOperationException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => 'MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_alter_partitions_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new InvalidOperationException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new MetaException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_result');
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
class ThriftHiveMetastore_alter_partition_with_environment_context_args {
static $_TSPEC;
@@ -14262,14 +14555,14 @@ class ThriftHiveMetastore_rename_partiti
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size432 = 0;
- $_etype435 = 0;
- $xfer += $input->readListBegin($_etype435, $_size432);
- for ($_i436 = 0; $_i436 < $_size432; ++$_i436)
+ $_size439 = 0;
+ $_etype442 = 0;
+ $xfer += $input->readListBegin($_etype442, $_size439);
+ for ($_i443 = 0; $_i443 < $_size439; ++$_i443)
{
- $elem437 = null;
- $xfer += $input->readString($elem437);
- $this->part_vals []= $elem437;
+ $elem444 = null;
+ $xfer += $input->readString($elem444);
+ $this->part_vals []= $elem444;
}
$xfer += $input->readListEnd();
} else {
@@ -14315,9 +14608,9 @@ class ThriftHiveMetastore_rename_partiti
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter438)
+ foreach ($this->part_vals as $iter445)
{
- $xfer += $output->writeString($iter438);
+ $xfer += $output->writeString($iter445);
}
}
$output->writeListEnd();
@@ -14749,14 +15042,14 @@ class ThriftHiveMetastore_partition_name
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size439 = 0;
- $_etype442 = 0;
- $xfer += $input->readListBegin($_etype442, $_size439);
- for ($_i443 = 0; $_i443 < $_size439; ++$_i443)
+ $_size446 = 0;
+ $_etype449 = 0;
+ $xfer += $input->readListBegin($_etype449, $_size446);
+ for ($_i450 = 0; $_i450 < $_size446; ++$_i450)
{
- $elem444 = null;
- $xfer += $input->readString($elem444);
- $this->success []= $elem444;
+ $elem451 = null;
+ $xfer += $input->readString($elem451);
+ $this->success []= $elem451;
}
$xfer += $input->readListEnd();
} else {
@@ -14792,9 +15085,9 @@ class ThriftHiveMetastore_partition_name
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter445)
+ foreach ($this->success as $iter452)
{
- $xfer += $output->writeString($iter445);
+ $xfer += $output->writeString($iter452);
}
}
$output->writeListEnd();
@@ -14945,17 +15238,17 @@ class ThriftHiveMetastore_partition_name
case 0:
if ($ftype == TType::MAP) {
$this->success = array();
- $_size446 = 0;
- $_ktype447 = 0;
- $_vtype448 = 0;
- $xfer += $input->readMapBegin($_ktype447, $_vtype448, $_size446);
- for ($_i450 = 0; $_i450 < $_size446; ++$_i450)
+ $_size453 = 0;
+ $_ktype454 = 0;
+ $_vtype455 = 0;
+ $xfer += $input->readMapBegin($_ktype454, $_vtype455, $_size453);
+ for ($_i457 = 0; $_i457 < $_size453; ++$_i457)
{
- $key451 = '';
- $val452 = '';
- $xfer += $input->readString($key451);
- $xfer += $input->readString($val452);
- $this->success[$key451] = $val452;
+ $key458 = '';
+ $val459 = '';
+ $xfer += $input->readString($key458);
+ $xfer += $input->readString($val459);
+ $this->success[$key458] = $val459;
}
$xfer += $input->readMapEnd();
} else {
@@ -14991,10 +15284,10 @@ class ThriftHiveMetastore_partition_name
{
$output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
{
- foreach ($this->success as $kiter453 => $viter454)
+ foreach ($this->success as $kiter460 => $viter461)
{
- $xfer += $output->writeString($kiter453);
- $xfer += $output->writeString($viter454);
+ $xfer += $output->writeString($kiter460);
+ $xfer += $output->writeString($viter461);
}
}
$output->writeMapEnd();
@@ -15102,17 +15395,17 @@ class ThriftHiveMetastore_markPartitionF
case 3:
if ($ftype == TType::MAP) {
$this->part_vals = array();
- $_size455 = 0;
- $_ktype456 = 0;
- $_vtype457 = 0;
- $xfer += $input->readMapBegin($_ktype456, $_vtype457, $_size455);
- for ($_i459 = 0; $_i459 < $_size455; ++$_i459)
+ $_size462 = 0;
+ $_ktype463 = 0;
+ $_vtype464 = 0;
+ $xfer += $input->readMapBegin($_ktype463, $_vtype464, $_size462);
+ for ($_i466 = 0; $_i466 < $_size462; ++$_i466)
{
- $key460 = '';
- $val461 = '';
- $xfer += $input->readString($key460);
- $xfer += $input->readString($val461);
- $this->part_vals[$key460] = $val461;
+ $key467 = '';
+ $val468 = '';
+ $xfer += $input->readString($key467);
+ $xfer += $input->readString($val468);
+ $this->part_vals[$key467] = $val468;
}
$xfer += $input->readMapEnd();
} else {
@@ -15157,10 +15450,10 @@ class ThriftHiveMetastore_markPartitionF
{
$output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $kiter462 => $viter463)
+ foreach ($this->part_vals as $kiter469 => $viter470)
{
- $xfer += $output->writeString($kiter462);
- $xfer += $output->writeString($viter463);
+ $xfer += $output->writeString($kiter469);
+ $xfer += $output->writeString($viter470);
}
}
$output->writeMapEnd();
@@ -15452,17 +15745,17 @@ class ThriftHiveMetastore_isPartitionMar
case 3:
if ($ftype == TType::MAP) {
$this->part_vals = array();
- $_size464 = 0;
- $_ktype465 = 0;
- $_vtype466 = 0;
- $xfer += $input->readMapBegin($_ktype465, $_vtype466, $_size464);
- for ($_i468 = 0; $_i468 < $_size464; ++$_i468)
+ $_size471 = 0;
+ $_ktype472 = 0;
+ $_vtype473 = 0;
+ $xfer += $input->readMapBegin($_ktype472, $_vtype473, $_size471);
+ for ($_i475 = 0; $_i475 < $_size471; ++$_i475)
{
- $key469 = '';
- $val470 = '';
- $xfer += $input->readString($key469);
- $xfer += $input->readString($val470);
- $this->part_vals[$key469] = $val470;
+ $key476 = '';
+ $val477 = '';
+ $xfer += $input->readString($key476);
+ $xfer += $input->readString($val477);
+ $this->part_vals[$key476] = $val477;
}
$xfer += $input->readMapEnd();
} else {
@@ -15507,10 +15800,10 @@ class ThriftHiveMetastore_isPartitionMar
{
$output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $kiter471 => $viter472)
+ foreach ($this->part_vals as $kiter478 => $viter479)
{
- $xfer += $output->writeString($kiter471);
- $xfer += $output->writeString($viter472);
+ $xfer += $output->writeString($kiter478);
+ $xfer += $output->writeString($viter479);
}
}
$output->writeMapEnd();
@@ -16870,15 +17163,15 @@ class ThriftHiveMetastore_get_indexes_re
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size473 = 0;
- $_etype476 = 0;
- $xfer += $input->readListBegin($_etype476, $_size473);
- for ($_i477 = 0; $_i477 < $_size473; ++$_i477)
+ $_size480 = 0;
+ $_etype483 = 0;
+ $xfer += $input->readListBegin($_etype483, $_size480);
+ for ($_i484 = 0; $_i484 < $_size480; ++$_i484)
{
- $elem478 = null;
- $elem478 = new Index();
- $xfer += $elem478->read($input);
- $this->success []= $elem478;
+ $elem485 = null;
+ $elem485 = new Index();
+ $xfer += $elem485->read($input);
+ $this->success []= $elem485;
}
$xfer += $input->readListEnd();
} else {
@@ -16922,9 +17215,9 @@ class ThriftHiveMetastore_get_indexes_re
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter479)
+ foreach ($this->success as $iter486)
{
- $xfer += $iter479->write($output);
+ $xfer += $iter486->write($output);
}
}
$output->writeListEnd();
@@ -17116,14 +17409,14 @@ class ThriftHiveMetastore_get_index_name
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size480 = 0;
- $_etype483 = 0;
- $xfer += $input->readListBegin($_etype483, $_size480);
- for ($_i484 = 0; $_i484 < $_size480; ++$_i484)
+ $_size487 = 0;
+ $_etype490 = 0;
+ $xfer += $input->readListBegin($_etype490, $_size487);
+ for ($_i491 = 0; $_i491 < $_size487; ++$_i491)
{
- $elem485 = null;
- $xfer += $input->readString($elem485);
- $this->success []= $elem485;
+ $elem492 = null;
+ $xfer += $input->readString($elem492);
+ $this->success []= $elem492;
}
$xfer += $input->readListEnd();
} else {
@@ -17159,9 +17452,9 @@ class ThriftHiveMetastore_get_index_name
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter486)
+ foreach ($this->success as $iter493)
{
- $xfer += $output->writeString($iter486);
+ $xfer += $output->writeString($iter493);
}
}
$output->writeListEnd();
@@ -17623,14 +17916,14 @@ class ThriftHiveMetastore_get_role_names
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size487 = 0;
- $_etype490 = 0;
- $xfer += $input->readListBegin($_etype490, $_size487);
- for ($_i491 = 0; $_i491 < $_size487; ++$_i491)
+ $_size494 = 0;
+ $_etype497 = 0;
+ $xfer += $input->readListBegin($_etype497, $_size494);
+ for ($_i498 = 0; $_i498 < $_size494; ++$_i498)
{
- $elem492 = null;
- $xfer += $input->readString($elem492);
- $this->success []= $elem492;
+ $elem499 = null;
+ $xfer += $input->readString($elem499);
+ $this->success []= $elem499;
}
$xfer += $input->readListEnd();
} else {
@@ -17666,9 +17959,9 @@ class ThriftHiveMetastore_get_role_names
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter493)
+ foreach ($this->success as $iter500)
{
- $xfer += $output->writeString($iter493);
+ $xfer += $output->writeString($iter500);
}
}
$output->writeListEnd();
@@ -18308,15 +18601,15 @@ class ThriftHiveMetastore_list_roles_res
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size494 = 0;
- $_etype497 = 0;
- $xfer += $input->readListBegin($_etype497, $_size494);
- for ($_i498 = 0; $_i498 < $_size494; ++$_i498)
+ $_size501 = 0;
+ $_etype504 = 0;
+ $xfer += $input->readListBegin($_etype504, $_size501);
+ for ($_i505 = 0; $_i505 < $_size501; ++$_i505)
{
- $elem499 = null;
- $elem499 = new Role();
- $xfer += $elem499->read($input);
- $this->success []= $elem499;
+ $elem506 = null;
+ $elem506 = new Role();
+ $xfer += $elem506->read($input);
+ $this->success []= $elem506;
}
$xfer += $input->readListEnd();
} else {
@@ -18352,9 +18645,9 @@ class ThriftHiveMetastore_list_roles_res
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter500)
+ foreach ($this->success as $iter507)
{
- $xfer += $iter500->write($output);
+ $xfer += $iter507->write($output);
}
}
$output->writeListEnd();
@@ -18452,14 +18745,14 @@ class ThriftHiveMetastore_get_privilege_
case 3:
if ($ftype == TType::LST) {
$this->group_names = array();
- $_size501 = 0;
- $_etype504 = 0;
- $xfer += $input->readListBegin($_etype504, $_size501);
- for ($_i505 = 0; $_i505 < $_size501; ++$_i505)
+ $_size508 = 0;
+ $_etype511 = 0;
+ $xfer += $input->readListBegin($_etype511, $_size508);
+ for ($_i512 = 0; $_i512 < $_size508; ++$_i512)
{
- $elem506 = null;
- $xfer += $input->readString($elem506);
- $this->group_names []= $elem506;
+ $elem513 = null;
+ $xfer += $input->readString($elem513);
+ $this->group_names []= $elem513;
}
$xfer += $input->readListEnd();
} else {
@@ -18500,9 +18793,9 @@ class ThriftHiveMetastore_get_privilege_
{
$output->writeListBegin(TType::STRING, count($this->group_names));
{
- foreach ($this->group_names as $iter507)
+ foreach ($this->group_names as $iter514)
{
- $xfer += $output->writeString($iter507);
+ $xfer += $output->writeString($iter514);
}
}
$output->writeListEnd();
@@ -18789,15 +19082,15 @@ class ThriftHiveMetastore_list_privilege
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size508 = 0;
- $_etype511 = 0;
- $xfer += $input->readListBegin($_etype511, $_size508);
- for ($_i512 = 0; $_i512 < $_size508; ++$_i512)
+ $_size515 = 0;
+ $_etype518 = 0;
+ $xfer += $input->readListBegin($_etype518, $_size515);
+ for ($_i519 = 0; $_i519 < $_size515; ++$_i519)
{
- $elem513 = null;
- $elem513 = new HiveObjectPrivilege();
- $xfer += $elem513->read($input);
- $this->success []= $elem513;
+ $elem520 = null;
+ $elem520 = new HiveObjectPrivilege();
+ $xfer += $elem520->read($input);
+ $this->success []= $elem520;
}
$xfer += $input->readListEnd();
} else {
@@ -18833,9 +19126,9 @@ class ThriftHiveMetastore_list_privilege
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter514)
+ foreach ($this->success as $iter521)
{
- $xfer += $iter514->write($output);
+ $xfer += $iter521->write($output);
}
}
$output->writeListEnd();
@@ -19258,14 +19551,14 @@ class ThriftHiveMetastore_set_ugi_args {
case 2:
if ($ftype == TType::LST) {
$this->group_names = array();
- $_size515 = 0;
- $_etype518 = 0;
- $xfer += $input->readListBegin($_etype518, $_size515);
- for ($_i519 = 0; $_i519 < $_size515; ++$_i519)
+ $_size522 = 0;
+ $_etype525 = 0;
+ $xfer += $input->readListBegin($_etype525, $_size522);
+ for ($_i526 = 0; $_i526 < $_size522; ++$_i526)
{
- $elem520 = null;
- $xfer += $input->readString($elem520);
- $this->group_names []= $elem520;
+ $elem527 = null;
+ $xfer += $input->readString($elem527);
+ $this->group_names []= $elem527;
}
$xfer += $input->readListEnd();
} else {
@@ -19298,9 +19591,9 @@ class ThriftHiveMetastore_set_ugi_args {
{
$output->writeListBegin(TType::STRING, count($this->group_names));
{
- foreach ($this->group_names as $iter521)
+ foreach ($this->group_names as $iter528)
{
- $xfer += $output->writeString($iter521);
+ $xfer += $output->writeString($iter528);
}
}
$output->writeListEnd();
@@ -19370,14 +19663,14 @@ class ThriftHiveMetastore_set_ugi_result
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size522 = 0;
- $_etype525 = 0;
- $xfer += $input->readListBegin($_etype525, $_size522);
- for ($_i526 = 0; $_i526 < $_size522; ++$_i526)
+ $_size529 = 0;
+ $_etype532 = 0;
+ $xfer += $input->readListBegin($_etype532, $_size529);
+ for ($_i533 = 0; $_i533 < $_size529; ++$_i533)
{
- $elem527 = null;
- $xfer += $input->readString($elem527);
- $this->success []= $elem527;
+ $elem534 = null;
+ $xfer += $input->readString($elem534);
+ $this->success []= $elem534;
}
$xfer += $input->readListEnd();
} else {
@@ -19413,9 +19706,9 @@ class ThriftHiveMetastore_set_ugi_result
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter528)
+ foreach ($this->success as $iter535)
{
- $xfer += $output->writeString($iter528);
+ $xfer += $output->writeString($iter535);
}
}
$output->writeListEnd();
Modified: hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote Sun Sep 16 17:17:16 2012
@@ -62,6 +62,7 @@ if len(sys.argv) <= 1 or sys.argv[1] ==
print ' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)'
print ' get_partitions_by_names(string db_name, string tbl_name, names)'
print ' void alter_partition(string db_name, string tbl_name, Partition new_part)'
+ print ' void alter_partitions(string db_name, string tbl_name, new_parts)'
print ' void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)'
print ' void rename_partition(string db_name, string tbl_name, part_vals, Partition new_part)'
print ' string get_config_value(string name, string defaultValue)'
@@ -385,6 +386,12 @@ elif cmd == 'alter_partition':
sys.exit(1)
pp.pprint(client.alter_partition(args[0],args[1],eval(args[2]),))
+elif cmd == 'alter_partitions':
+ if len(args) != 3:
+ print 'alter_partitions requires 3 args'
+ sys.exit(1)
+ pp.pprint(client.alter_partitions(args[0],args[1],eval(args[2]),))
+
elif cmd == 'alter_partition_with_environment_context':
if len(args) != 4:
print 'alter_partition_with_environment_context requires 4 args'
Modified: hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py Sun Sep 16 17:17:16 2012
@@ -366,6 +366,15 @@ class Iface(fb303.FacebookService.Iface)
"""
pass
+ def alter_partitions(self, db_name, tbl_name, new_parts):
+ """
+ Parameters:
+ - db_name
+ - tbl_name
+ - new_parts
+ """
+ pass
+
def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context):
"""
Parameters:
@@ -2116,6 +2125,42 @@ class Client(fb303.FacebookService.Clien
raise result.o2
return
+ def alter_partitions(self, db_name, tbl_name, new_parts):
+ """
+ Parameters:
+ - db_name
+ - tbl_name
+ - new_parts
+ """
+ self.send_alter_partitions(db_name, tbl_name, new_parts)
+ self.recv_alter_partitions()
+
+ def send_alter_partitions(self, db_name, tbl_name, new_parts):
+ self._oprot.writeMessageBegin('alter_partitions', TMessageType.CALL, self._seqid)
+ args = alter_partitions_args()
+ args.db_name = db_name
+ args.tbl_name = tbl_name
+ args.new_parts = new_parts
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_alter_partitions(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = alter_partitions_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ return
+
def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context):
"""
Parameters:
@@ -3126,6 +3171,7 @@ class Processor(fb303.FacebookService.Pr
self._processMap["get_partitions_by_filter"] = Processor.process_get_partitions_by_filter
self._processMap["get_partitions_by_names"] = Processor.process_get_partitions_by_names
self._processMap["alter_partition"] = Processor.process_alter_partition
+ self._processMap["alter_partitions"] = Processor.process_alter_partitions
self._processMap["alter_partition_with_environment_context"] = Processor.process_alter_partition_with_environment_context
self._processMap["rename_partition"] = Processor.process_rename_partition
self._processMap["get_config_value"] = Processor.process_get_config_value
@@ -3845,6 +3891,22 @@ class Processor(fb303.FacebookService.Pr
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_alter_partitions(self, seqid, iprot, oprot):
+ args = alter_partitions_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = alter_partitions_result()
+ try:
+ self._handler.alter_partitions(args.db_name, args.tbl_name, args.new_parts)
+ except InvalidOperationException, o1:
+ result.o1 = o1
+ except MetaException, o2:
+ result.o2 = o2
+ oprot.writeMessageBegin("alter_partitions", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_alter_partition_with_environment_context(self, seqid, iprot, oprot):
args = alter_partition_with_environment_context_args()
args.read(iprot)
@@ -11252,6 +11314,173 @@ class alter_partition_result:
def __ne__(self, other):
return not (self == other)
+class alter_partitions_args:
+ """
+ Attributes:
+ - db_name
+ - tbl_name
+ - new_parts
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'db_name', None, None, ), # 1
+ (2, TType.STRING, 'tbl_name', None, None, ), # 2
+ (3, TType.LIST, 'new_parts', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3
+ )
+
+ def __init__(self, db_name=None, tbl_name=None, new_parts=None,):
+ self.db_name = db_name
+ self.tbl_name = tbl_name
+ self.new_parts = new_parts
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.db_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.tbl_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.LIST:
+ self.new_parts = []
+ (_etype435, _size432) = iprot.readListBegin()
+ for _i436 in xrange(_size432):
+ _elem437 = Partition()
+ _elem437.read(iprot)
+ self.new_parts.append(_elem437)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('alter_partitions_args')
+ if self.db_name is not None:
+ oprot.writeFieldBegin('db_name', TType.STRING, 1)
+ oprot.writeString(self.db_name)
+ oprot.writeFieldEnd()
+ if self.tbl_name is not None:
+ oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
+ oprot.writeString(self.tbl_name)
+ oprot.writeFieldEnd()
+ if self.new_parts is not None:
+ oprot.writeFieldBegin('new_parts', TType.LIST, 3)
+ oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
+ for iter438 in self.new_parts:
+ iter438.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class alter_partitions_result:
+ """
+ Attributes:
+ - o1
+ - o2
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'o1', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2
+ )
+
+ def __init__(self, o1=None, o2=None,):
+ self.o1 = o1
+ self.o2 = o2
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = InvalidOperationException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o2 = MetaException()
+ self.o2.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('alter_partitions_result')
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o2 is not None:
+ oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+ self.o2.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class alter_partition_with_environment_context_args:
"""
Attributes:
@@ -11469,10 +11698,10 @@ class rename_partition_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype435, _size432) = iprot.readListBegin()
- for _i436 in xrange(_size432):
- _elem437 = iprot.readString();
- self.part_vals.append(_elem437)
+ (_etype442, _size439) = iprot.readListBegin()
+ for _i443 in xrange(_size439):
+ _elem444 = iprot.readString();
+ self.part_vals.append(_elem444)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11503,8 +11732,8 @@ class rename_partition_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter438 in self.part_vals:
- oprot.writeString(iter438)
+ for iter445 in self.part_vals:
+ oprot.writeString(iter445)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.new_part is not None:
@@ -11835,10 +12064,10 @@ class partition_name_to_vals_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype442, _size439) = iprot.readListBegin()
- for _i443 in xrange(_size439):
- _elem444 = iprot.readString();
- self.success.append(_elem444)
+ (_etype449, _size446) = iprot.readListBegin()
+ for _i450 in xrange(_size446):
+ _elem451 = iprot.readString();
+ self.success.append(_elem451)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11861,8 +12090,8 @@ class partition_name_to_vals_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter445 in self.success:
- oprot.writeString(iter445)
+ for iter452 in self.success:
+ oprot.writeString(iter452)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -11975,11 +12204,11 @@ class partition_name_to_spec_result:
if fid == 0:
if ftype == TType.MAP:
self.success = {}
- (_ktype447, _vtype448, _size446 ) = iprot.readMapBegin()
- for _i450 in xrange(_size446):
- _key451 = iprot.readString();
- _val452 = iprot.readString();
- self.success[_key451] = _val452
+ (_ktype454, _vtype455, _size453 ) = iprot.readMapBegin()
+ for _i457 in xrange(_size453):
+ _key458 = iprot.readString();
+ _val459 = iprot.readString();
+ self.success[_key458] = _val459
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -12002,9 +12231,9 @@ class partition_name_to_spec_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
- for kiter453,viter454 in self.success.items():
- oprot.writeString(kiter453)
- oprot.writeString(viter454)
+ for kiter460,viter461 in self.success.items():
+ oprot.writeString(kiter460)
+ oprot.writeString(viter461)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -12074,11 +12303,11 @@ class markPartitionForEvent_args:
elif fid == 3:
if ftype == TType.MAP:
self.part_vals = {}
- (_ktype456, _vtype457, _size455 ) = iprot.readMapBegin()
- for _i459 in xrange(_size455):
- _key460 = iprot.readString();
- _val461 = iprot.readString();
- self.part_vals[_key460] = _val461
+ (_ktype463, _vtype464, _size462 ) = iprot.readMapBegin()
+ for _i466 in xrange(_size462):
+ _key467 = iprot.readString();
+ _val468 = iprot.readString();
+ self.part_vals[_key467] = _val468
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -12108,9 +12337,9 @@ class markPartitionForEvent_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
- for kiter462,viter463 in self.part_vals.items():
- oprot.writeString(kiter462)
- oprot.writeString(viter463)
+ for kiter469,viter470 in self.part_vals.items():
+ oprot.writeString(kiter469)
+ oprot.writeString(viter470)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.eventType is not None:
@@ -12306,11 +12535,11 @@ class isPartitionMarkedForEvent_args:
elif fid == 3:
if ftype == TType.MAP:
self.part_vals = {}
- (_ktype465, _vtype466, _size464 ) = iprot.readMapBegin()
- for _i468 in xrange(_size464):
- _key469 = iprot.readString();
- _val470 = iprot.readString();
- self.part_vals[_key469] = _val470
+ (_ktype472, _vtype473, _size471 ) = iprot.readMapBegin()
+ for _i475 in xrange(_size471):
+ _key476 = iprot.readString();
+ _val477 = iprot.readString();
+ self.part_vals[_key476] = _val477
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -12340,9 +12569,9 @@ class isPartitionMarkedForEvent_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
- for kiter471,viter472 in self.part_vals.items():
- oprot.writeString(kiter471)
- oprot.writeString(viter472)
+ for kiter478,viter479 in self.part_vals.items():
+ oprot.writeString(kiter478)
+ oprot.writeString(viter479)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.eventType is not None:
@@ -13314,11 +13543,11 @@ class get_indexes_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype476, _size473) = iprot.readListBegin()
- for _i477 in xrange(_size473):
- _elem478 = Index()
- _elem478.read(iprot)
- self.success.append(_elem478)
+ (_etype483, _size480) = iprot.readListBegin()
+ for _i484 in xrange(_size480):
+ _elem485 = Index()
+ _elem485.read(iprot)
+ self.success.append(_elem485)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -13347,8 +13576,8 @@ class get_indexes_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter479 in self.success:
- iter479.write(oprot)
+ for iter486 in self.success:
+ iter486.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -13489,10 +13718,10 @@ class get_index_names_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype483, _size480) = iprot.readListBegin()
- for _i484 in xrange(_size480):
- _elem485 = iprot.readString();
- self.success.append(_elem485)
+ (_etype490, _size487) = iprot.readListBegin()
+ for _i491 in xrange(_size487):
+ _elem492 = iprot.readString();
+ self.success.append(_elem492)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -13515,8 +13744,8 @@ class get_index_names_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter486 in self.success:
- oprot.writeString(iter486)
+ for iter493 in self.success:
+ oprot.writeString(iter493)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o2 is not None:
@@ -13876,10 +14105,10 @@ class get_role_names_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype490, _size487) = iprot.readListBegin()
- for _i491 in xrange(_size487):
- _elem492 = iprot.readString();
- self.success.append(_elem492)
+ (_etype497, _size494) = iprot.readListBegin()
+ for _i498 in xrange(_size494):
+ _elem499 = iprot.readString();
+ self.success.append(_elem499)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -13902,8 +14131,8 @@ class get_role_names_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter493 in self.success:
- oprot.writeString(iter493)
+ for iter500 in self.success:
+ oprot.writeString(iter500)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -14376,11 +14605,11 @@ class list_roles_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype497, _size494) = iprot.readListBegin()
- for _i498 in xrange(_size494):
- _elem499 = Role()
- _elem499.read(iprot)
- self.success.append(_elem499)
+ (_etype504, _size501) = iprot.readListBegin()
+ for _i505 in xrange(_size501):
+ _elem506 = Role()
+ _elem506.read(iprot)
+ self.success.append(_elem506)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -14403,8 +14632,8 @@ class list_roles_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter500 in self.success:
- iter500.write(oprot)
+ for iter507 in self.success:
+ iter507.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -14472,10 +14701,10 @@ class get_privilege_set_args:
elif fid == 3:
if ftype == TType.LIST:
self.group_names = []
- (_etype504, _size501) = iprot.readListBegin()
- for _i505 in xrange(_size501):
- _elem506 = iprot.readString();
- self.group_names.append(_elem506)
+ (_etype511, _size508) = iprot.readListBegin()
+ for _i512 in xrange(_size508):
+ _elem513 = iprot.readString();
+ self.group_names.append(_elem513)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -14500,8 +14729,8 @@ class get_privilege_set_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter507 in self.group_names:
- oprot.writeString(iter507)
+ for iter514 in self.group_names:
+ oprot.writeString(iter514)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -14708,11 +14937,11 @@ class list_privileges_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype511, _size508) = iprot.readListBegin()
- for _i512 in xrange(_size508):
- _elem513 = HiveObjectPrivilege()
- _elem513.read(iprot)
- self.success.append(_elem513)
+ (_etype518, _size515) = iprot.readListBegin()
+ for _i519 in xrange(_size515):
+ _elem520 = HiveObjectPrivilege()
+ _elem520.read(iprot)
+ self.success.append(_elem520)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -14735,8 +14964,8 @@ class list_privileges_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter514 in self.success:
- iter514.write(oprot)
+ for iter521 in self.success:
+ iter521.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -15061,10 +15290,10 @@ class set_ugi_args:
elif fid == 2:
if ftype == TType.LIST:
self.group_names = []
- (_etype518, _size515) = iprot.readListBegin()
- for _i519 in xrange(_size515):
- _elem520 = iprot.readString();
- self.group_names.append(_elem520)
+ (_etype525, _size522) = iprot.readListBegin()
+ for _i526 in xrange(_size522):
+ _elem527 = iprot.readString();
+ self.group_names.append(_elem527)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -15085,8 +15314,8 @@ class set_ugi_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter521 in self.group_names:
- oprot.writeString(iter521)
+ for iter528 in self.group_names:
+ oprot.writeString(iter528)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -15135,10 +15364,10 @@ class set_ugi_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype525, _size522) = iprot.readListBegin()
- for _i526 in xrange(_size522):
- _elem527 = iprot.readString();
- self.success.append(_elem527)
+ (_etype532, _size529) = iprot.readListBegin()
+ for _i533 in xrange(_size529):
+ _elem534 = iprot.readString();
+ self.success.append(_elem534)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -15161,8 +15390,8 @@ class set_ugi_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter528 in self.success:
- oprot.writeString(iter528)
+ for iter535 in self.success:
+ oprot.writeString(iter535)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Sun Sep 16 17:17:16 2012
@@ -710,6 +710,22 @@ module ThriftHiveMetastore
return
end
+ def alter_partitions(db_name, tbl_name, new_parts)
+ send_alter_partitions(db_name, tbl_name, new_parts)
+ recv_alter_partitions()
+ end
+
+ def send_alter_partitions(db_name, tbl_name, new_parts)
+ send_message('alter_partitions', Alter_partitions_args, :db_name => db_name, :tbl_name => tbl_name, :new_parts => new_parts)
+ end
+
+ def recv_alter_partitions()
+ result = receive_message(Alter_partitions_result)
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ return
+ end
+
def alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context)
send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context)
recv_alter_partition_with_environment_context()
@@ -1713,6 +1729,19 @@ module ThriftHiveMetastore
write_result(result, oprot, 'alter_partition', seqid)
end
+ def process_alter_partitions(seqid, iprot, oprot)
+ args = read_args(iprot, Alter_partitions_args)
+ result = Alter_partitions_result.new()
+ begin
+ @handler.alter_partitions(args.db_name, args.tbl_name, args.new_parts)
+ rescue InvalidOperationException => o1
+ result.o1 = o1
+ rescue MetaException => o2
+ result.o2 = o2
+ end
+ write_result(result, oprot, 'alter_partitions', seqid)
+ end
+
def process_alter_partition_with_environment_context(seqid, iprot, oprot)
args = read_args(iprot, Alter_partition_with_environment_context_args)
result = Alter_partition_with_environment_context_result.new()
@@ -3653,6 +3682,44 @@ module ThriftHiveMetastore
::Thrift::Struct.generate_accessors self
end
+ class Alter_partitions_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ DB_NAME = 1
+ TBL_NAME = 2
+ NEW_PARTS = 3
+
+ FIELDS = {
+ DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
+ TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
+ NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => Partition}}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Alter_partitions_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ O1 = 1
+ O2 = 2
+
+ FIELDS = {
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => InvalidOperationException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
class Alter_partition_with_environment_context_args
include ::Thrift::Struct, ::Thrift::Struct_Union
DB_NAME = 1
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java Sun Sep 16 17:17:16 2012
@@ -79,4 +79,27 @@ public interface AlterHandler extends Co
final String name, final List<String> part_vals, final Partition new_part)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException,
MetaException;
+
+ /**
+ * handles alter partitions
+ *
+ * @param msdb
+ * object to get metadata
+ * @param wh
+ * @param dbname
+ * database of the partition being altered
+ * @param name
+ * table of the partition being altered
+ * @param new_parts
+ * new partition list
+ * @return the altered partition list
+ * @throws InvalidOperationException
+ * @throws InvalidObjectException
+ * @throws AlreadyExistsException
+ * @throws MetaException
+ */
+ public abstract List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
+ final String dbname, final String name, final List<Partition> new_part)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException,
+ MetaException;
}
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java Sun Sep 16 17:17:16 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore
import java.io.IOException;
import java.net.URI;
+import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
@@ -368,6 +369,35 @@ public class HiveAlterHandler implements
return oldPart;
}
+ public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname,
+ final String name, final List<Partition> new_parts)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException,
+ MetaException {
+ List<Partition> oldParts = new ArrayList<Partition>();
+ List<List<String>> partValsList = new ArrayList<List<String>>();
+ try {
+ for (Partition tmpPart: new_parts) {
+ // Set DDL time to now if not specified
+ if (tmpPart.getParameters() == null ||
+ tmpPart.getParameters().get(Constants.DDL_TIME) == null ||
+ Integer.parseInt(tmpPart.getParameters().get(Constants.DDL_TIME)) == 0) {
+ tmpPart.putToParameters(Constants.DDL_TIME, Long.toString(System
+ .currentTimeMillis() / 1000));
+ }
+ Partition oldTmpPart = msdb.getPartition(dbname, name, tmpPart.getValues());
+ oldParts.add(oldTmpPart);
+ partValsList.add(tmpPart.getValues());
+ }
+ msdb.alterPartitions(dbname, name, partValsList, new_parts);
+ } catch (InvalidObjectException e) {
+ throw new InvalidOperationException("alter is not possible");
+ } catch (NoSuchObjectException e){
+ //old partition does not exist
+ throw new InvalidOperationException("alter is not possible");
+ }
+ return oldParts;
+ }
+
private boolean checkPartialPartKeysEqual(List<FieldSchema> oldPartKeys,
List<FieldSchema> newPartKeys) {
//return true if both are null, or false if one is null and the other isn't
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Sun Sep 16 17:17:16 2012
@@ -31,6 +31,7 @@ import java.util.Collections;
import java.util.Formatter;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -1823,6 +1824,60 @@ public class HiveMetaStore extends Thrif
return;
}
+ @Override
+ public void alter_partitions(final String db_name, final String tbl_name,
+ final List<Partition> new_parts)
+ throws InvalidOperationException, MetaException,
+ TException {
+
+ startTableFunction("alter_partitions", db_name, tbl_name);
+
+ if (LOG.isInfoEnabled()) {
+ for (Partition tmpPart: new_parts) {
+ LOG.info("New partition values:" + tmpPart.getValues());
+ }
+ }
+ // all partitions are altered atomically
+ // all prehooks are fired together followed by all post hooks
+ List<Partition> oldParts = null;
+ try {
+ for (Partition tmpPart: new_parts) {
+ try {
+ for (MetaStorePreEventListener listener : preListeners) {
+ listener.onEvent(
+ new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this));
+ }
+ } catch (NoSuchObjectException e) {
+ throw new MetaException(e.getMessage());
+ }
+ }
+ oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts);
+
+ Iterator<Partition> olditr = oldParts.iterator();
+ for (Partition tmpPart: new_parts) {
+ Partition oldTmpPart = null;
+ if (olditr.hasNext()) {
+ oldTmpPart = (Partition)olditr.next();
+ }
+ else {
+ throw new InvalidOperationException("failed to alterpartitions");
+ }
+ for (MetaStoreEventListener listener : listeners) {
+ AlterPartitionEvent alterPartitionEvent =
+ new AlterPartitionEvent(oldTmpPart, tmpPart, true, this);
+ listener.onAlterPartition(alterPartitionEvent);
+ }
+ }
+ } catch (InvalidObjectException e) {
+ throw new InvalidOperationException(e.getMessage());
+ } catch (AlreadyExistsException e) {
+ throw new InvalidOperationException(e.getMessage());
+ } finally {
+ endFunction("alter_partition", oldParts != null);
+ }
+ return;
+ }
+
public boolean create_index(Index index_def)
throws IndexAlreadyExistsException, MetaException {
endFunction(startFunction("create_index"), false);
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Sun Sep 16 17:17:16 2012
@@ -834,6 +834,11 @@ public class HiveMetaStoreClient impleme
client.alter_partition(dbName, tblName, newPart);
}
+ public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+ throws InvalidOperationException, MetaException, TException {
+ client.alter_partitions(dbName, tblName, newParts);
+}
+
public void alterDatabase(String dbName, Database db)
throws MetaException, NoSuchObjectException, TException {
client.alter_database(dbName, db);
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Sun Sep 16 17:17:16 2012
@@ -554,6 +554,25 @@ public interface IMetaStoreClient {
throws InvalidOperationException, MetaException, TException;
/**
+ * updates a list of partitions
+ *
+ * @param dbName
+ * database of the old partition
+ * @param tblName
+ * table name of the old partition
+ * @param newParts
+ * list of partitions
+ * @throws InvalidOperationException
+ * if the old partition does not exist
+ * @throws MetaException
+ * if error in updating metadata
+ * @throws TException
+ * if error in communicating with metastore server
+ */
+ public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+ throws InvalidOperationException, MetaException, TException;
+
+ /**
* rename a partition to a new partition
*
* @param dbname
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Sun Sep 16 17:17:16 2012
@@ -1975,27 +1975,53 @@ public class ObjectStore implements RawS
}
}
+ private void alterPartitionNoTxn(String dbname, String name, List<String> part_vals,
+ Partition newPart) throws InvalidObjectException, MetaException {
+ name = name.toLowerCase();
+ dbname = dbname.toLowerCase();
+ MPartition oldp = getMPartition(dbname, name, part_vals);
+ MPartition newp = convertToMPart(newPart, false);
+ if (oldp == null || newp == null) {
+ throw new InvalidObjectException("partition does not exist.");
+ }
+ oldp.setValues(newp.getValues());
+ oldp.setPartitionName(newp.getPartitionName());
+ oldp.setParameters(newPart.getParameters());
+ copyMSD(newp.getSd(), oldp.getSd());
+ if (newp.getCreateTime() != oldp.getCreateTime()) {
+ oldp.setCreateTime(newp.getCreateTime());
+ }
+ if (newp.getLastAccessTime() != oldp.getLastAccessTime()) {
+ oldp.setLastAccessTime(newp.getLastAccessTime());
+ }
+ }
+
public void alterPartition(String dbname, String name, List<String> part_vals, Partition newPart)
throws InvalidObjectException, MetaException {
boolean success = false;
try {
openTransaction();
- name = name.toLowerCase();
- dbname = dbname.toLowerCase();
- MPartition oldp = getMPartition(dbname, name, part_vals);
- MPartition newp = convertToMPart(newPart, false);
- if (oldp == null || newp == null) {
- throw new InvalidObjectException("partition does not exist.");
- }
- oldp.setValues(newp.getValues());
- oldp.setPartitionName(newp.getPartitionName());
- oldp.setParameters(newPart.getParameters());
- copyMSD(newp.getSd(), oldp.getSd());
- if (newp.getCreateTime() != oldp.getCreateTime()) {
- oldp.setCreateTime(newp.getCreateTime());
+ alterPartitionNoTxn(dbname, name, part_vals, newPart);
+ // commit the changes
+ success = commitTransaction();
+ } finally {
+ if (!success) {
+ rollbackTransaction();
+ throw new MetaException(
+ "The transaction for alter partition did not commit successfully.");
}
- if (newp.getLastAccessTime() != oldp.getLastAccessTime()) {
- oldp.setLastAccessTime(newp.getLastAccessTime());
+ }
+ }
+
+ public void alterPartitions(String dbname, String name, List<List<String>> part_vals,
+ List<Partition> newParts) throws InvalidObjectException, MetaException {
+ boolean success = false;
+ try {
+ openTransaction();
+ Iterator<List<String>> part_val_itr = part_vals.iterator();
+ for (Partition tmpPart: newParts) {
+ List<String> tmpPartVals = part_val_itr.next();
+ alterPartitionNoTxn(dbname, name, tmpPartVals, tmpPart);
}
// commit the changes
success = commitTransaction();
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Sun Sep 16 17:17:16 2012
@@ -159,6 +159,10 @@ public interface RawStore extends Config
public abstract void alterPartition(String db_name, String tbl_name, List<String> part_vals,
Partition new_part) throws InvalidObjectException, MetaException;
+ public abstract void alterPartitions(String db_name, String tbl_name,
+ List<List<String>> part_vals_list, List<Partition> new_parts)
+ throws InvalidObjectException, MetaException;
+
public abstract boolean addIndex(Index index)
throws InvalidObjectException, MetaException;
Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java (original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java Sun Sep 16 17:17:16 2012
@@ -257,6 +257,14 @@ public class DummyRawStoreForJdoConnecti
}
@Override
+ public void alterPartitions(String db_name, String tbl_name, List<List<String>> part_vals_list,
+ List<Partition> new_parts) throws InvalidObjectException, MetaException {
+
+
+ }
+
+
+ @Override
public boolean addIndex(Index index) throws InvalidObjectException, MetaException {
return false;
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Sun Sep 16 17:17:16 2012
@@ -2663,6 +2663,17 @@ public class DDLTask extends Task<DDLWor
outStream.write(separator);
}
+ private void setAlterProtectMode(boolean protectModeEnable,
+ AlterTableDesc.ProtectModeType protectMode,
+ ProtectMode mode) {
+ if (protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
+ mode.offline = protectModeEnable;
+ } else if (protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
+ mode.noDrop = protectModeEnable;
+ } else if (protectMode == AlterTableDesc.ProtectModeType.NO_DROP_CASCADE) {
+ mode.noDropCascade = protectModeEnable;
+ }
+ }
/**
* Alter a given table.
*
@@ -2679,14 +2690,20 @@ public class DDLTask extends Task<DDLWor
Table tbl = db.getTable(alterTbl.getOldName());
Partition part = null;
- if(alterTbl.getPartSpec() != null) {
- part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
- if(part == null) {
- formatter.consoleError(console,
- "Partition : " + alterTbl.getPartSpec().toString()
- + " does not exist.",
- formatter.MISSING);
- return 1;
+ List<Partition> allPartitions = null;
+ if (alterTbl.getPartSpec() != null) {
+ if (alterTbl.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
+ part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
+ if (part == null) {
+ formatter.consoleError(console,
+ "Partition : " + alterTbl.getPartSpec().toString()
+ + " does not exist.",
+ formatter.MISSING);
+ return 1;
+ }
+ }
+ else {
+ allPartitions = db.getPartitions(tbl, alterTbl.getPartSpec());
}
}
@@ -2863,38 +2880,17 @@ public class DDLTask extends Task<DDLWor
AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType();
ProtectMode mode = null;
- if(part != null) {
- mode = part.getProtectMode();
+ if (allPartitions != null) {
+ for (Partition tmpPart: allPartitions) {
+ mode = tmpPart.getProtectMode();
+ setAlterProtectMode(protectModeEnable, protectMode, mode);
+ tmpPart.setProtectMode(mode);
+ }
} else {
mode = tbl.getProtectMode();
- }
-
- if (protectModeEnable
- && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
- mode.offline = true;
- } else if (protectModeEnable
- && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
- mode.noDrop = true;
- } else if (protectModeEnable
- && protectMode == AlterTableDesc.ProtectModeType.NO_DROP_CASCADE) {
- mode.noDropCascade = true;
- } else if (!protectModeEnable
- && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
- mode.offline = false;
- } else if (!protectModeEnable
- && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
- mode.noDrop = false;
- } else if (!protectModeEnable
- && protectMode == AlterTableDesc.ProtectModeType.NO_DROP_CASCADE) {
- mode.noDropCascade = false;
- }
-
- if (part != null) {
- part.setProtectMode(mode);
- } else {
+ setAlterProtectMode(protectModeEnable,protectMode, mode);
tbl.setProtectMode(mode);
}
-
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
// validate sort columns and bucket columns
List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl
@@ -2948,7 +2944,7 @@ public class DDLTask extends Task<DDLWor
return 1;
}
- if(part == null) {
+ if (part == null && allPartitions == null) {
if (!updateModifiedParameters(tbl.getTTable().getParameters(), conf)) {
return 1;
}
@@ -2960,18 +2956,28 @@ public class DDLTask extends Task<DDLWor
formatter.ERROR);
return 1;
}
- } else {
+ } else if (part != null) {
if (!updateModifiedParameters(part.getParameters(), conf)) {
return 1;
}
}
+ else {
+ for (Partition tmpPart: allPartitions) {
+ if (!updateModifiedParameters(tmpPart.getParameters(), conf)) {
+ return 1;
+ }
+ }
+ }
try {
- if (part == null) {
+ if (part == null && allPartitions == null) {
db.alterTable(alterTbl.getOldName(), tbl);
- } else {
+ } else if (part != null) {
db.alterPartition(tbl.getTableName(), part);
}
+ else {
+ db.alterPartitions(tbl.getTableName(), allPartitions);
+ }
} catch (InvalidOperationException e) {
console.printError("Invalid alter operation: " + e.getMessage());
LOG.info("alter table: " + stringifyException(e));
@@ -2988,7 +2994,14 @@ public class DDLTask extends Task<DDLWor
if(part != null) {
work.getInputs().add(new ReadEntity(part));
work.getOutputs().add(new WriteEntity(part));
- } else {
+ }
+ else if (allPartitions != null ){
+ for (Partition tmpPart: allPartitions) {
+ work.getInputs().add(new ReadEntity(tmpPart));
+ work.getOutputs().add(new WriteEntity(tmpPart));
+ }
+ }
+ else {
work.getInputs().add(new ReadEntity(oldTbl));
work.getOutputs().add(new WriteEntity(tbl));
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Sun Sep 16 17:17:16 2012
@@ -433,6 +433,37 @@ public class Hive {
}
/**
+ * Updates the existing table metadata with the new metadata.
+ *
+ * @param tblName
+ * name of the existing table
+ * @param newParts
+ * new partitions
+ * @throws InvalidOperationException
+ * if the changes in metadata is not acceptable
+ * @throws TException
+ */
+ public void alterPartitions(String tblName, List<Partition> newParts)
+ throws InvalidOperationException, HiveException {
+ Table t = newTable(tblName);
+ List<org.apache.hadoop.hive.metastore.api.Partition> newTParts =
+ new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>();
+ try {
+ // Remove the DDL time so that it gets refreshed
+ for (Partition tmpPart: newParts) {
+ if (tmpPart.getParameters() != null) {
+ tmpPart.getParameters().remove(Constants.DDL_TIME);
+ }
+ newTParts.add(tmpPart.getTPartition());
+ }
+ getMSC().alter_partitions(t.getDbName(), t.getTableName(), newTParts);
+ } catch (MetaException e) {
+ throw new HiveException("Unable to alter partition.", e);
+ } catch (TException e) {
+ throw new HiveException("Unable to alter partition.", e);
+ }
+ }
+ /**
* Rename a old partition to new partition
*
* @param tbl
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1385320&r1=1385319&r2=1385320&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Sun Sep 16 17:17:16 2012
@@ -1088,19 +1088,39 @@ public class DDLSemanticAnalyzer extends
Table tab = null;
try {
tab = db.getTable(db.getCurrentDatabase(), tableName, true);
- inputs.add(new ReadEntity(tab));
+ } catch (HiveException e) {
+ throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
+ }
- if ((partSpec == null) || (partSpec.isEmpty())) {
- outputs.add(new WriteEntity(tab));
+ inputs.add(new ReadEntity(tab));
+
+ if ((partSpec == null) || (partSpec.isEmpty())) {
+ outputs.add(new WriteEntity(tab));
+ }
+ else {
+ List<Partition> allPartitions = null;
+ try {
+ if (desc == null || desc.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE){
+ Partition part = db.getPartition(tab, partSpec, false);
+ allPartitions = new ArrayList<Partition>(1);
+ allPartitions.add(part);
+ }
+ else {
+ allPartitions = db.getPartitions(tab, partSpec);
+ if (allPartitions == null || allPartitions.size() == 0) {
+ throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partSpec.toString()));
+ }
+ }
}
- else {
- Partition part = db.getPartition(tab, partSpec, false);
- if (part != null) {
+ catch (HiveException e) {
+ throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partSpec.toString()), e);
+ }
+
+ if (allPartitions != null ){
+ for (Partition part: allPartitions) {
outputs.add(new WriteEntity(part));
}
}
- } catch (HiveException e) {
- throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
}
if (desc != null) {
Added: hive/trunk/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q?rev=1385320&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q Sun Sep 16 17:17:16 2012
@@ -0,0 +1,8 @@
+-- Create table
+create table if not exists alter_part_invalidspec(key string, value string ) partitioned by (year string, month string) stored as textfile ;
+
+-- Load data
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_invalidspec partition (year='1996', month='10');
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_invalidspec partition (year='1996', month='12');
+
+alter table alter_part_invalidspec partition (year='1997') enable no_drop;
Added: hive/trunk/ql/src/test/queries/clientnegative/alter_partition_nodrop.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/alter_partition_nodrop.q?rev=1385320&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/alter_partition_nodrop.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/alter_partition_nodrop.q Sun Sep 16 17:17:16 2012
@@ -0,0 +1,9 @@
+-- Create table
+create table if not exists alter_part_nodrop_part(key string, value string ) partitioned by (year string, month string) stored as textfile ;
+
+-- Load data
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_nodrop_part partition (year='1996', month='10');
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_nodrop_part partition (year='1996', month='12');
+
+alter table alter_part_nodrop_part partition (year='1996') enable no_drop;
+alter table alter_part_nodrop_part drop partition (year='1996');
Added: hive/trunk/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q?rev=1385320&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q Sun Sep 16 17:17:16 2012
@@ -0,0 +1,9 @@
+-- Create table
+create table if not exists alter_part_nodrop_table(key string, value string ) partitioned by (year string, month string) stored as textfile ;
+
+-- Load data
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_nodrop_table partition (year='1996', month='10');
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_nodrop_table partition (year='1996', month='12');
+
+alter table alter_part_nodrop_table partition (year='1996') enable no_drop;
+drop table alter_part_nodrop_table;
Added: hive/trunk/ql/src/test/queries/clientnegative/alter_partition_offline.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/alter_partition_offline.q?rev=1385320&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/alter_partition_offline.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/alter_partition_offline.q Sun Sep 16 17:17:16 2012
@@ -0,0 +1,11 @@
+-- create table
+create table if not exists alter_part_offline (key string, value string ) partitioned by (year string, month string) stored as textfile ;
+
+-- Load data
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_offline partition (year='1996', month='10');
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_offline partition (year='1996', month='12');
+
+alter table alter_part_offline partition (year='1996') disable offline;
+select * from alter_part_offline where year = '1996';
+alter table alter_part_offline partition (year='1996') enable offline;
+select * from alter_part_offline where year = '1996';
Added: hive/trunk/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q?rev=1385320&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q Sun Sep 16 17:17:16 2012
@@ -0,0 +1,26 @@
+-- Create table
+create table if not exists alter_part_protect_mode(key string, value string ) partitioned by (year string, month string) stored as textfile ;
+
+-- Load data
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='10');
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='12');
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1995', month='09');
+load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1994', month='07');
+
+-- offline
+alter table alter_part_protect_mode partition (year='1996') disable offline;
+select * from alter_part_protect_mode where year = '1996';
+alter table alter_part_protect_mode partition (year='1995') enable offline;
+alter table alter_part_protect_mode partition (year='1995') disable offline;
+select * from alter_part_protect_mode where year = '1995';
+
+-- no_drop
+alter table alter_part_protect_mode partition (year='1996') enable no_drop;
+alter table alter_part_protect_mode partition (year='1995') disable no_drop;
+alter table alter_part_protect_mode drop partition (year='1995');
+alter table alter_part_protect_mode partition (year='1994', month='07') disable no_drop;
+alter table alter_part_protect_mode drop partition (year='1994');
+
+-- Cleanup
+alter table alter_part_protect_mode partition (year='1996') disable no_drop;
+drop table alter_part_protect_mode;