You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2017/11/27 22:27:55 UTC
[01/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Repository: hive
Updated Branches:
refs/heads/master be1f84733 -> 44ef59915
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index da88bf5..6121c9b 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -27,6 +27,7 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
@@ -67,6 +68,8 @@ import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.thrift.TException;
@@ -1007,4 +1010,41 @@ public class DummyRawStoreForJdoConnection implements RawStore {
throws NoSuchObjectException, MetaException {
return null;
}
+
+ @Override
+ public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ }
+
+ @Override
+ public void alterPool(WMPool pool, String poolPath) throws AlreadyExistsException,
+ NoSuchObjectException, InvalidOperationException, MetaException {
+ }
+
+ @Override
+ public void dropWMPool(String resourcePlanName, String poolPath)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ }
+
+ @Override
+ public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+ MetaException {
+ }
+
+ @Override
+ public void dropWMMapping(WMMapping mapping)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ }
+
+ @Override
+ public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ }
+
+ @Override
+ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+ }
}
[05/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index e70b41b..b5b18f2 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -1329,6 +1329,58 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
* @throws \metastore\MetaException
*/
public function get_triggers_for_resourceplan(\metastore\WMGetTriggersForResourePlanRequest $request);
+ /**
+ * @param \metastore\WMCreatePoolRequest $request
+ * @return \metastore\WMCreatePoolResponse
+ * @throws \metastore\AlreadyExistsException
+ * @throws \metastore\NoSuchObjectException
+ * @throws \metastore\InvalidObjectException
+ * @throws \metastore\MetaException
+ */
+ public function create_wm_pool(\metastore\WMCreatePoolRequest $request);
+ /**
+ * @param \metastore\WMAlterPoolRequest $request
+ * @return \metastore\WMAlterPoolResponse
+ * @throws \metastore\AlreadyExistsException
+ * @throws \metastore\NoSuchObjectException
+ * @throws \metastore\InvalidObjectException
+ * @throws \metastore\MetaException
+ */
+ public function alter_wm_pool(\metastore\WMAlterPoolRequest $request);
+ /**
+ * @param \metastore\WMDropPoolRequest $request
+ * @return \metastore\WMDropPoolResponse
+ * @throws \metastore\NoSuchObjectException
+ * @throws \metastore\InvalidOperationException
+ * @throws \metastore\MetaException
+ */
+ public function drop_wm_pool(\metastore\WMDropPoolRequest $request);
+ /**
+ * @param \metastore\WMCreateOrUpdateMappingRequest $request
+ * @return \metastore\WMCreateOrUpdateMappingResponse
+ * @throws \metastore\AlreadyExistsException
+ * @throws \metastore\NoSuchObjectException
+ * @throws \metastore\InvalidObjectException
+ * @throws \metastore\MetaException
+ */
+ public function create_or_update_wm_mapping(\metastore\WMCreateOrUpdateMappingRequest $request);
+ /**
+ * @param \metastore\WMDropMappingRequest $request
+ * @return \metastore\WMDropMappingResponse
+ * @throws \metastore\NoSuchObjectException
+ * @throws \metastore\InvalidOperationException
+ * @throws \metastore\MetaException
+ */
+ public function drop_wm_mapping(\metastore\WMDropMappingRequest $request);
+ /**
+ * @param \metastore\WMCreateOrDropTriggerToPoolMappingRequest $request
+ * @return \metastore\WMCreateOrDropTriggerToPoolMappingResponse
+ * @throws \metastore\AlreadyExistsException
+ * @throws \metastore\NoSuchObjectException
+ * @throws \metastore\InvalidObjectException
+ * @throws \metastore\MetaException
+ */
+ public function create_or_drop_wm_trigger_to_pool_mapping(\metastore\WMCreateOrDropTriggerToPoolMappingRequest $request);
}
class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf {
@@ -11159,196 +11211,389 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
throw new \Exception("get_triggers_for_resourceplan failed: unknown result");
}
-}
+ public function create_wm_pool(\metastore\WMCreatePoolRequest $request)
+ {
+ $this->send_create_wm_pool($request);
+ return $this->recv_create_wm_pool();
+ }
-// HELPER FUNCTIONS AND STRUCTURES
+ public function send_create_wm_pool(\metastore\WMCreatePoolRequest $request)
+ {
+ $args = new \metastore\ThriftHiveMetastore_create_wm_pool_args();
+ $args->request = $request;
+ $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'create_wm_pool', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('create_wm_pool', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
-class ThriftHiveMetastore_getMetaConf_args {
- static $_TSPEC;
+ public function recv_create_wm_pool()
+ {
+ $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_wm_pool_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
- /**
- * @var string
- */
- public $key = null;
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
+ }
+ $result = new \metastore\ThriftHiveMetastore_create_wm_pool_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->success !== null) {
+ return $result->success;
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ if ($result->o3 !== null) {
+ throw $result->o3;
+ }
+ if ($result->o4 !== null) {
+ throw $result->o4;
+ }
+ throw new \Exception("create_wm_pool failed: unknown result");
+ }
- public function __construct($vals=null) {
- if (!isset(self::$_TSPEC)) {
- self::$_TSPEC = array(
- 1 => array(
- 'var' => 'key',
- 'type' => TType::STRING,
- ),
- );
+ public function alter_wm_pool(\metastore\WMAlterPoolRequest $request)
+ {
+ $this->send_alter_wm_pool($request);
+ return $this->recv_alter_wm_pool();
+ }
+
+ public function send_alter_wm_pool(\metastore\WMAlterPoolRequest $request)
+ {
+ $args = new \metastore\ThriftHiveMetastore_alter_wm_pool_args();
+ $args->request = $request;
+ $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'alter_wm_pool', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
}
- if (is_array($vals)) {
- if (isset($vals['key'])) {
- $this->key = $vals['key'];
+ else
+ {
+ $this->output_->writeMessageBegin('alter_wm_pool', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
+
+ public function recv_alter_wm_pool()
+ {
+ $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_alter_wm_pool_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
}
+ $result = new \metastore\ThriftHiveMetastore_alter_wm_pool_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->success !== null) {
+ return $result->success;
}
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ if ($result->o3 !== null) {
+ throw $result->o3;
+ }
+ if ($result->o4 !== null) {
+ throw $result->o4;
+ }
+ throw new \Exception("alter_wm_pool failed: unknown result");
}
- public function getName() {
- return 'ThriftHiveMetastore_getMetaConf_args';
+ public function drop_wm_pool(\metastore\WMDropPoolRequest $request)
+ {
+ $this->send_drop_wm_pool($request);
+ return $this->recv_drop_wm_pool();
}
- public function read($input)
+ public function send_drop_wm_pool(\metastore\WMDropPoolRequest $request)
{
- $xfer = 0;
- $fname = null;
- $ftype = 0;
- $fid = 0;
- $xfer += $input->readStructBegin($fname);
- while (true)
+ $args = new \metastore\ThriftHiveMetastore_drop_wm_pool_args();
+ $args->request = $request;
+ $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
{
- $xfer += $input->readFieldBegin($fname, $ftype, $fid);
- if ($ftype == TType::STOP) {
- break;
- }
- switch ($fid)
- {
- case 1:
- if ($ftype == TType::STRING) {
- $xfer += $input->readString($this->key);
- } else {
- $xfer += $input->skip($ftype);
- }
- break;
- default:
- $xfer += $input->skip($ftype);
- break;
+ thrift_protocol_write_binary($this->output_, 'drop_wm_pool', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('drop_wm_pool', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
+
+ public function recv_drop_wm_pool()
+ {
+ $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_wm_pool_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
}
- $xfer += $input->readFieldEnd();
+ $result = new \metastore\ThriftHiveMetastore_drop_wm_pool_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
}
- $xfer += $input->readStructEnd();
- return $xfer;
+ if ($result->success !== null) {
+ return $result->success;
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ if ($result->o3 !== null) {
+ throw $result->o3;
+ }
+ throw new \Exception("drop_wm_pool failed: unknown result");
}
- public function write($output) {
- $xfer = 0;
- $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args');
- if ($this->key !== null) {
- $xfer += $output->writeFieldBegin('key', TType::STRING, 1);
- $xfer += $output->writeString($this->key);
- $xfer += $output->writeFieldEnd();
+ public function create_or_update_wm_mapping(\metastore\WMCreateOrUpdateMappingRequest $request)
+ {
+ $this->send_create_or_update_wm_mapping($request);
+ return $this->recv_create_or_update_wm_mapping();
+ }
+
+ public function send_create_or_update_wm_mapping(\metastore\WMCreateOrUpdateMappingRequest $request)
+ {
+ $args = new \metastore\ThriftHiveMetastore_create_or_update_wm_mapping_args();
+ $args->request = $request;
+ $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'create_or_update_wm_mapping', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('create_or_update_wm_mapping', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
}
- $xfer += $output->writeFieldStop();
- $xfer += $output->writeStructEnd();
- return $xfer;
}
-}
+ public function recv_create_or_update_wm_mapping()
+ {
+ $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_or_update_wm_mapping_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
-class ThriftHiveMetastore_getMetaConf_result {
- static $_TSPEC;
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
+ }
+ $result = new \metastore\ThriftHiveMetastore_create_or_update_wm_mapping_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->success !== null) {
+ return $result->success;
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ if ($result->o3 !== null) {
+ throw $result->o3;
+ }
+ if ($result->o4 !== null) {
+ throw $result->o4;
+ }
+ throw new \Exception("create_or_update_wm_mapping failed: unknown result");
+ }
- /**
- * @var string
- */
- public $success = null;
- /**
- * @var \metastore\MetaException
- */
- public $o1 = null;
+ public function drop_wm_mapping(\metastore\WMDropMappingRequest $request)
+ {
+ $this->send_drop_wm_mapping($request);
+ return $this->recv_drop_wm_mapping();
+ }
- public function __construct($vals=null) {
- if (!isset(self::$_TSPEC)) {
- self::$_TSPEC = array(
- 0 => array(
- 'var' => 'success',
- 'type' => TType::STRING,
- ),
- 1 => array(
- 'var' => 'o1',
- 'type' => TType::STRUCT,
- 'class' => '\metastore\MetaException',
- ),
- );
+ public function send_drop_wm_mapping(\metastore\WMDropMappingRequest $request)
+ {
+ $args = new \metastore\ThriftHiveMetastore_drop_wm_mapping_args();
+ $args->request = $request;
+ $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'drop_wm_mapping', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
}
- if (is_array($vals)) {
- if (isset($vals['success'])) {
- $this->success = $vals['success'];
- }
- if (isset($vals['o1'])) {
- $this->o1 = $vals['o1'];
+ else
+ {
+ $this->output_->writeMessageBegin('drop_wm_mapping', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
+
+ public function recv_drop_wm_mapping()
+ {
+ $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_wm_mapping_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
}
+ $result = new \metastore\ThriftHiveMetastore_drop_wm_mapping_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->success !== null) {
+ return $result->success;
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ if ($result->o3 !== null) {
+ throw $result->o3;
}
+ throw new \Exception("drop_wm_mapping failed: unknown result");
}
- public function getName() {
- return 'ThriftHiveMetastore_getMetaConf_result';
+ public function create_or_drop_wm_trigger_to_pool_mapping(\metastore\WMCreateOrDropTriggerToPoolMappingRequest $request)
+ {
+ $this->send_create_or_drop_wm_trigger_to_pool_mapping($request);
+ return $this->recv_create_or_drop_wm_trigger_to_pool_mapping();
}
- public function read($input)
+ public function send_create_or_drop_wm_trigger_to_pool_mapping(\metastore\WMCreateOrDropTriggerToPoolMappingRequest $request)
{
- $xfer = 0;
- $fname = null;
- $ftype = 0;
- $fid = 0;
- $xfer += $input->readStructBegin($fname);
- while (true)
+ $args = new \metastore\ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args();
+ $args->request = $request;
+ $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
{
- $xfer += $input->readFieldBegin($fname, $ftype, $fid);
- if ($ftype == TType::STOP) {
- break;
- }
- switch ($fid)
- {
- case 0:
- if ($ftype == TType::STRING) {
- $xfer += $input->readString($this->success);
- } else {
- $xfer += $input->skip($ftype);
- }
- break;
- case 1:
- if ($ftype == TType::STRUCT) {
- $this->o1 = new \metastore\MetaException();
- $xfer += $this->o1->read($input);
- } else {
- $xfer += $input->skip($ftype);
- }
- break;
- default:
- $xfer += $input->skip($ftype);
- break;
- }
- $xfer += $input->readFieldEnd();
+ thrift_protocol_write_binary($this->output_, 'create_or_drop_wm_trigger_to_pool_mapping', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('create_or_drop_wm_trigger_to_pool_mapping', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
}
- $xfer += $input->readStructEnd();
- return $xfer;
}
- public function write($output) {
- $xfer = 0;
- $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result');
- if ($this->success !== null) {
- $xfer += $output->writeFieldBegin('success', TType::STRING, 0);
- $xfer += $output->writeString($this->success);
- $xfer += $output->writeFieldEnd();
+ public function recv_create_or_drop_wm_trigger_to_pool_mapping()
+ {
+ $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
+ }
+ $result = new \metastore\ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
}
- if ($this->o1 !== null) {
- $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
- $xfer += $this->o1->write($output);
- $xfer += $output->writeFieldEnd();
+ if ($result->success !== null) {
+ return $result->success;
}
- $xfer += $output->writeFieldStop();
- $xfer += $output->writeStructEnd();
- return $xfer;
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ if ($result->o3 !== null) {
+ throw $result->o3;
+ }
+ if ($result->o4 !== null) {
+ throw $result->o4;
+ }
+ throw new \Exception("create_or_drop_wm_trigger_to_pool_mapping failed: unknown result");
}
}
-class ThriftHiveMetastore_setMetaConf_args {
+// HELPER FUNCTIONS AND STRUCTURES
+
+class ThriftHiveMetastore_getMetaConf_args {
static $_TSPEC;
/**
* @var string
*/
public $key = null;
- /**
- * @var string
- */
- public $value = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -11357,24 +11602,203 @@ class ThriftHiveMetastore_setMetaConf_args {
'var' => 'key',
'type' => TType::STRING,
),
- 2 => array(
- 'var' => 'value',
- 'type' => TType::STRING,
- ),
);
}
if (is_array($vals)) {
if (isset($vals['key'])) {
$this->key = $vals['key'];
}
- if (isset($vals['value'])) {
- $this->value = $vals['value'];
- }
}
}
public function getName() {
- return 'ThriftHiveMetastore_setMetaConf_args';
+ return 'ThriftHiveMetastore_getMetaConf_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->key);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args');
+ if ($this->key !== null) {
+ $xfer += $output->writeFieldBegin('key', TType::STRING, 1);
+ $xfer += $output->writeString($this->key);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_getMetaConf_result {
+ static $_TSPEC;
+
+ /**
+ * @var string
+ */
+ public $success = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o1 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRING,
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_getMetaConf_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->success);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new \metastore\MetaException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result');
+ if ($this->success !== null) {
+ $xfer += $output->writeFieldBegin('success', TType::STRING, 0);
+ $xfer += $output->writeString($this->success);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_setMetaConf_args {
+ static $_TSPEC;
+
+ /**
+ * @var string
+ */
+ public $key = null;
+ /**
+ * @var string
+ */
+ public $value = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'key',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'value',
+ 'type' => TType::STRING,
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['key'])) {
+ $this->key = $vals['key'];
+ }
+ if (isset($vals['value'])) {
+ $this->value = $vals['value'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_setMetaConf_args';
}
public function read($input)
@@ -48387,11 +48811,1531 @@ class ThriftHiveMetastore_create_resource_plan_result {
}
-class ThriftHiveMetastore_get_resource_plan_args {
+class ThriftHiveMetastore_get_resource_plan_args {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMGetResourcePlanRequest
+ */
+ public $request = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'request',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMGetResourcePlanRequest',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['request'])) {
+ $this->request = $vals['request'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_resource_plan_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->request = new \metastore\WMGetResourcePlanRequest();
+ $xfer += $this->request->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_args');
+ if ($this->request !== null) {
+ if (!is_object($this->request)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
+ $xfer += $this->request->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_get_resource_plan_result {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMGetResourcePlanResponse
+ */
+ public $success = null;
+ /**
+ * @var \metastore\NoSuchObjectException
+ */
+ public $o1 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o2 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMGetResourcePlanResponse',
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\NoSuchObjectException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_resource_plan_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::STRUCT) {
+ $this->success = new \metastore\WMGetResourcePlanResponse();
+ $xfer += $this->success->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new \metastore\NoSuchObjectException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new \metastore\MetaException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_result');
+ if ($this->success !== null) {
+ if (!is_object($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+ $xfer += $this->success->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_get_active_resource_plan_args {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMGetActiveResourcePlanRequest
+ */
+ public $request = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'request',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMGetActiveResourcePlanRequest',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['request'])) {
+ $this->request = $vals['request'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_active_resource_plan_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->request = new \metastore\WMGetActiveResourcePlanRequest();
+ $xfer += $this->request->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_active_resource_plan_args');
+ if ($this->request !== null) {
+ if (!is_object($this->request)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
+ $xfer += $this->request->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_get_active_resource_plan_result {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMGetActiveResourcePlanResponse
+ */
+ public $success = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o2 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMGetActiveResourcePlanResponse',
+ ),
+ 1 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_active_resource_plan_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::STRUCT) {
+ $this->success = new \metastore\WMGetActiveResourcePlanResponse();
+ $xfer += $this->success->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new \metastore\MetaException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_active_resource_plan_result');
+ if ($this->success !== null) {
+ if (!is_object($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+ $xfer += $this->success->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 1);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_get_all_resource_plans_args {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMGetAllResourcePlanRequest
+ */
+ public $request = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'request',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMGetAllResourcePlanRequest',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['request'])) {
+ $this->request = $vals['request'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_all_resource_plans_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->request = new \metastore\WMGetAllResourcePlanRequest();
+ $xfer += $this->request->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_args');
+ if ($this->request !== null) {
+ if (!is_object($this->request)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
+ $xfer += $this->request->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_get_all_resource_plans_result {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMGetAllResourcePlanResponse
+ */
+ public $success = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o1 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMGetAllResourcePlanResponse',
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_all_resource_plans_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::STRUCT) {
+ $this->success = new \metastore\WMGetAllResourcePlanResponse();
+ $xfer += $this->success->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new \metastore\MetaException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_result');
+ if ($this->success !== null) {
+ if (!is_object($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+ $xfer += $this->success->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_alter_resource_plan_args {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMAlterResourcePlanRequest
+ */
+ public $request = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'request',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMAlterResourcePlanRequest',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['request'])) {
+ $this->request = $vals['request'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_alter_resource_plan_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->request = new \metastore\WMAlterResourcePlanRequest();
+ $xfer += $this->request->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_resource_plan_args');
+ if ($this->request !== null) {
+ if (!is_object($this->request)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
+ $xfer += $this->request->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_alter_resource_plan_result {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMAlterResourcePlanResponse
+ */
+ public $success = null;
+ /**
+ * @var \metastore\NoSuchObjectException
+ */
+ public $o1 = null;
+ /**
+ * @var \metastore\InvalidOperationException
+ */
+ public $o2 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o3 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMAlterResourcePlanResponse',
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\NoSuchObjectException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\InvalidOperationException',
+ ),
+ 3 => array(
+ 'var' => 'o3',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ if (isset($vals['o3'])) {
+ $this->o3 = $vals['o3'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_alter_resource_plan_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::STRUCT) {
+ $this->success = new \metastore\WMAlterResourcePlanResponse();
+ $xfer += $this->success->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new \metastore\NoSuchObjectException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new \metastore\InvalidOperationException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRUCT) {
+ $this->o3 = new \metastore\MetaException();
+ $xfer += $this->o3->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_resource_plan_result');
+ if ($this->success !== null) {
+ if (!is_object($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+ $xfer += $this->success->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o3 !== null) {
+ $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+ $xfer += $this->o3->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_validate_resource_plan_args {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMValidateResourcePlanRequest
+ */
+ public $request = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'request',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMValidateResourcePlanRequest',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['request'])) {
+ $this->request = $vals['request'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_validate_resource_plan_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->request = new \metastore\WMValidateResourcePlanRequest();
+ $xfer += $this->request->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_validate_resource_plan_args');
+ if ($this->request !== null) {
+ if (!is_object($this->request)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
+ $xfer += $this->request->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_validate_resource_plan_result {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMValidateResourcePlanResponse
+ */
+ public $success = null;
+ /**
+ * @var \metastore\NoSuchObjectException
+ */
+ public $o1 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o2 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMValidateResourcePlanResponse',
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\NoSuchObjectException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_validate_resource_plan_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::STRUCT) {
+ $this->success = new \metastore\WMValidateResourcePlanResponse();
+ $xfer += $this->success->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new \metastore\NoSuchObjectException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new \metastore\MetaException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_validate_resource_plan_result');
+ if ($this->success !== null) {
+ if (!is_object($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+ $xfer += $this->success->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_drop_resource_plan_args {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMDropResourcePlanRequest
+ */
+ public $request = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'request',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMDropResourcePlanRequest',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['request'])) {
+ $this->request = $vals['request'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_drop_resource_plan_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->request = new \metastore\WMDropResourcePlanRequest();
+ $xfer += $this->request->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_resource_plan_args');
+ if ($this->request !== null) {
+ if (!is_object($this->request)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
+ $xfer += $this->request->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_drop_resource_plan_result {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMDropResourcePlanResponse
+ */
+ public $success = null;
+ /**
+ * @var \metastore\NoSuchObjectException
+ */
+ public $o1 = null;
+ /**
+ * @var \metastore\InvalidOperationException
+ */
+ public $o2 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o3 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMDropResourcePlanResponse',
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\NoSuchObjectException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\InvalidOperationException',
+ ),
+ 3 => array(
+ 'var' => 'o3',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ if (isset($vals['o3'])) {
+ $this->o3 = $vals['o3'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_drop_resource_plan_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::STRUCT) {
+ $this->success = new \metastore\WMDropResourcePlanResponse();
+ $xfer += $this->success->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new \metastore\NoSuchObjectException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new \metastore\InvalidOperationException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRUCT) {
+ $this->o3 = new \metastore\MetaException();
+ $xfer += $this->o3->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_resource_plan_result');
+ if ($this->success !== null) {
+ if (!is_object($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+ $xfer += $this->success->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o3 !== null) {
+ $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+ $xfer += $this->o3->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_create_wm_trigger_args {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMCreateTriggerRequest
+ */
+ public $request = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'request',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMCreateTriggerRequest',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['request'])) {
+ $this->request = $vals['request'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_create_wm_trigger_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->request = new \metastore\WMCreateTriggerRequest();
+ $xfer += $this->request->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_wm_trigger_args');
+ if ($this->request !== null) {
+ if (!is_object($this->request)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
+ $xfer += $this->request->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_create_wm_trigger_result {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMCreateTriggerResponse
+ */
+ public $success = null;
+ /**
+ * @var \metastore\AlreadyExistsException
+ */
+ public $o1 = null;
+ /**
+ * @var \metastore\NoSuchObjectException
+ */
+ public $o2 = null;
+ /**
+ * @var \metastore\InvalidObjectException
+ */
+ public $o3 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o4 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMCreateTriggerResponse',
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\AlreadyExistsException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\NoSuchObjectException',
+ ),
+ 3 => array(
+ 'var' => 'o3',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\InvalidObjectException',
+ ),
+ 4 => array(
+ 'var' => 'o4',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ if (isset($vals['o3'])) {
+ $this->o3 = $vals['o3'];
+ }
+ if (isset($vals['o4'])) {
+ $this->o4 = $vals['o4'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_create_wm_trigger_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::STRUCT) {
+ $this->success = new \metastore\WMCreateTriggerResponse();
+ $xfer += $this->success->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new \metastore\AlreadyExistsException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new \metastore\NoSuchObjectException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRUCT) {
+ $this->o3 = new \metastore\InvalidObjectException();
+ $xfer += $this->o3->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 4:
+ if ($ftype == TType::STRUCT) {
+ $this->o4 = new \metastore\MetaException();
+ $xfer += $this->o4->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_wm_trigger_result');
+ if ($this->success !== null) {
+ if (!is_object($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+ $xfer += $this->success->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o3 !== null) {
+ $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+ $xfer += $this->o3->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o4 !== null) {
+ $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4);
+ $xfer += $this->o4->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_alter_wm_trigger_args {
static $_TSPEC;
/**
- * @var \metastore\WMGetResourcePlanRequest
+ * @var \metastore\WMAlterTriggerRequest
*/
public $request = null;
@@ -48401,7 +50345,7 @@ class ThriftHiveMetastore_get_resource_plan_args {
1 => array(
'var' => 'request',
'type' => TType::STRUCT,
- 'class' => '\metastore\WMGetResourcePlanRequest',
+ 'class' => '\metastore\WMAlterTriggerRequest',
),
);
}
@@ -48413,7 +50357,7 @@ class ThriftHiveMetastore_get_resource_plan_args {
}
public function getName() {
- return 'ThriftHiveMetastore_get_resource_plan_args';
+ return 'ThriftHiveMetastore_alter_wm_trigger_args';
}
public function read($input)
@@ -48433,7 +50377,7 @@ class ThriftHiveMetastore_get_resource_plan_args {
{
case 1:
if ($ftype == TType::STRUCT) {
- $this->request = new \metastore\WMGetResourcePlanRequest();
+ $this->request = new \metastore\WMAlterTriggerRequest();
$xfer += $this->request->read($input);
} else {
$xfer += $input->skip($ftype);
@@ -48451,7 +50395,7 @@ class ThriftHiveMetastore_get_resource_plan_args {
public function write($output) {
$xfer = 0;
- $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_args');
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_wm_trigger_args');
if ($this->request !== null) {
if (!is_object($this->request)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
@@ -48467,11 +50411,11 @@ class ThriftHiveMetastore_get_resource_plan_args {
}
-class ThriftHiveMetastore_get_resource_plan_result {
+class ThriftHiveMetastore_alter_wm_trigger_result {
static $_TSPEC;
/**
- * @var \metastore\WMGetResourcePlanResponse
+ * @var \metastore\WMAlterTriggerResponse
*/
public $success = null;
/**
@@ -48479,9 +50423,13 @@ class ThriftHiveMetastore_get_resource_plan_result {
*/
public $o1 = null;
/**
- * @var \metastore\MetaException
+ * @var \metastore\InvalidObjectException
*/
public $o2 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o3 = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -48489,7 +50437,7 @@ class ThriftHiveMetastore_get_resource_plan_result {
0 => array(
'var' => 'success',
'type' => TType::STRUCT,
- 'class' => '\metastore\WMGetResourcePlanResponse',
+ 'class' => '\metastore\WMAlterTriggerResponse',
),
1 => array(
'var' => 'o1',
@@ -48499,6 +50447,11 @@ class ThriftHiveMetastore_get_resource_plan_result {
2 => array(
'var' => 'o2',
'type' => TType::STRUCT,
+ 'class' => '\metastore\InvalidObjectException',
+ ),
+ 3 => array(
+ 'var' => 'o3',
+ 'type' => TType::STRUCT,
'class' => '\metastore\MetaException',
),
);
@@ -48513,11 +50466,14 @@ class ThriftHiveMetastore_get_resource_plan_result {
if (isset($vals['o2'])) {
$this->o2 = $vals['o2'];
}
+ if (isset($vals['o3'])) {
+ $this->o3 = $vals['o3'];
+ }
}
}
public function getName() {
- return 'ThriftHiveMetastore_get_resource_plan_result';
+ return 'ThriftHiveMetastore_alter_wm_trigger_result';
}
public function read($input)
@@ -48537,7 +50493,7 @@ class ThriftHiveMetastore_get_resource_plan_result {
{
case 0:
if ($ftype == TType::STRUCT) {
- $this->success = new \metastore\WMGetResourcePlanResponse();
+ $this->success = new \metastore\WMAlterTriggerResponse();
$xfer += $this->success->read($input);
} else {
$xfer += $input->skip($ftype);
@@ -48553,12 +50509,20 @@ class ThriftHiveMetastore_get_resource_plan_result {
break;
case 2:
if ($ftype == TType::STRUCT) {
- $this->o2 = new \metastore\MetaException();
+ $this->o2 = new \metastore\InvalidObjectException();
$xfer += $this->o2->read($input);
} else {
$xfer += $input->skip($ftype);
}
break;
+ case 3:
+ if ($ftype == TType::STRUCT) {
+ $this->o3 = new \metastore\MetaException();
+ $xfer += $this->o3->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -48571,7 +50535,7 @@ class ThriftHiveMetastore_get_resource_plan_result {
public function write($output) {
$xfer = 0;
- $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_result');
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_wm_trigger_result');
if ($this->success !== null) {
if (!is_object($this->success)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
@@ -48590,6 +50554,11 @@ class ThriftHiveMetastore_get_resource_plan_result {
$xfer += $this->o2->write($output);
$xfer += $output->writeFieldEnd();
}
+ if ($this->o3 !== null) {
+ $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+ $xfer += $this->o3->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -48597,11 +50566,11 @@ class ThriftHiveMetastore_get_resource_plan_result {
}
-class ThriftHiveMetastore_get_active_resource_plan_args {
+class ThriftHiveMetastore_drop_wm_trigger_args {
static $_TSPEC;
/**
- * @var \metastore\WMGetActiveResourcePlanRequest
+ * @var \metastore\WMDropTriggerRequest
*/
public $request = null;
@@ -48611,7 +50580,7 @@ class ThriftHiveMetastore_get_active_resource_plan_args {
1 => array(
'var' => 'request',
'type' => TType::STRUCT,
- 'class' => '\metastore\WMGetActiveResourcePlanRequest',
+ 'class' => '\metastore\WMDropTriggerRequest',
),
);
}
@@ -48623,7 +50592,7 @@ class ThriftHiveMetastore_get_active_resource_plan_args {
}
public function getName() {
- return 'ThriftHiveMetastore_get_active_resource_plan_args';
+ return 'ThriftHiveMetastore_drop_wm_trigger_args';
}
public function read($input)
@@ -48643,7 +50612,7 @@ class ThriftHiveMetastore_get_active_resource_plan_args {
{
case 1:
if ($ftype == TType::STRUCT) {
- $this->request = new \metastore\WMGetActiveResourcePlanRequest();
+ $this->request = new \metastore\WMDropTriggerRequest();
$xfer += $this->request->read($input);
} else {
$xfer += $input->skip($ftype);
@@ -48661,7 +50630,7 @@ class ThriftHiveMetastore_get_active_resource_plan_args {
public function write($output) {
$xfer = 0;
- $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_active_resource_plan_args');
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_trigger_args');
if ($this->request !== null) {
if (!is_object($this->request)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
@@ -48677,17 +50646,25 @@ class ThriftHiveMetastore_get_active_resource_plan_args {
}
-class ThriftHiveMetastore_get_active_resource_plan_result {
+class ThriftHiveMetastore_drop_wm_trigger_result {
static $_TSPEC;
/**
- * @var \metastore\WMGetActiveResourcePlanResponse
+ * @var \metastore\WMDropTriggerResponse
*/
public $success = null;
/**
- * @var \metastore\MetaException
+ * @var \metastore\NoSuchObjectException
+ */
+ public $o1 = null;
+ /**
+ * @var \metastore\InvalidOperationException
*/
public $o2 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o3 = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -48695,11 +50672,21 @@ class ThriftHiveMetastore_get_active_resource_plan_result {
0 => array(
'var' => 'success',
'type' => TType::STRUCT,
- 'class' => '\metastore\WMGetActiveResourcePlanResponse',
+ 'class' => '\metastore\WMDropTriggerResponse',
),
1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\NoSuchObjectException',
+ ),
+ 2 => array(
'var' => 'o2',
'type' => TType::STRUCT,
+ 'class' => '\metastore\InvalidOperationException',
+ ),
+ 3 => array(
+ 'var' => 'o3',
+ 'type' => TType::STRUCT,
'class' => '\metastore\MetaException',
),
);
@@ -48708,14 +50695,20 @@ class ThriftHiveMetastore_get_active_resource_plan_result {
if (isset($vals['success'])) {
$this->success = $vals['success'];
}
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
if (isset($vals['o2'])) {
$this->o2 = $vals['o2'];
}
+ if (isset($vals['o3'])) {
+ $this->o3 = $vals['o3'];
+ }
}
}
public function getName() {
- return 'ThriftHiveMetastore_get_active_resource_plan_result';
+ return 'ThriftHiveMetastore_drop_wm_trigger_result';
}
public function read($input)
@@ -48735,7 +50728,7 @@ class ThriftHiveMetastore_get_active_resource_plan_result {
{
case 0:
if ($ftype == TType::STRUCT) {
- $this->success = new \metastore\WMGetActiveResourcePlanResponse();
+ $this->success = new \metastore\WMDropTriggerResponse();
$xfer += $this->success->read($input);
} else {
$xfer += $input->skip($ftype);
@@ -48743,12 +50736,28 @@ class ThriftHiveMetastore_get_active_resource_plan_result {
break;
case 1:
if ($ftype == TType::STRUCT) {
- $this->o2 = new \metastore\MetaException();
+ $this->o1 = new \metastore\NoSuchObjectException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new \metastore\InvalidOperationException();
$xfer += $this->o2->read($input);
} else {
$xfer += $input->skip($ftype);
}
break;
+ case 3:
+ if ($ftype == TType::STRUCT) {
+ $this->o3 = new \metastore\MetaException();
+ $xfer += $this->o3->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -48761,7 +50770,7 @@ class ThriftHiveMetastore_get_active_resource_plan_result {
public function write($output) {
$xfer = 0;
- $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_active_resource_plan_result');
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_trigger_result');
if ($this->success !== null) {
if (!is_object($this->success)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
@@ -48770,11 +50779,21 @@ class ThriftHiveMetastore_get_active_resource_plan_result {
$xfer += $this->success->write($output);
$xfer += $output->writeFieldEnd();
}
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
if ($this->o2 !== null) {
- $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 1);
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
$xfer += $this->o2->write($output);
$xfer += $output->writeFieldEnd();
}
+ if ($this->o3 !== null) {
+ $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+ $xfer += $this->o3->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -48782,11 +50801,11 @@ class ThriftHiveMetastore_get_active_resource_plan_result {
}
-class ThriftHiveMetastore_get_all_resource_plans_args {
+class ThriftHiveMetastore_get_triggers_for_resourceplan_args {
static $_TSPEC;
/**
- * @var \metastore\WMGetAllResourcePlanRequest
+ * @var \metastore\WMGetTriggersForResourePlanRequest
*/
public $request = null;
@@ -48796,7 +50815,7 @@ class ThriftHiveMetastore_get_all_resource_plans_args {
1 => array(
'var' => 'request',
'type' => TType::STRUCT,
- 'class' => '\metastore\WMGetAllResourcePlanRequest',
+ 'class' => '\metastore\WMGetTriggersForResourePlanRequest',
),
);
}
@@ -48808,7 +50827,7 @@ class ThriftHiveMetastore_get_all_resource_plans_args {
}
public function getName() {
- return 'ThriftHiveMetastore_get_all_resource_plans_args';
+ return 'ThriftHiveMetastore_get_triggers_for_resourceplan_args';
}
public function read($input)
@@ -48828,7 +50847,7 @@ class ThriftHiveMetastore_get_all_resource_plans_args {
{
case 1:
if ($ftype == TType::STRUCT) {
- $this->request = new \metastore\WMGetAllResourcePlanRequest();
+ $this->request = new \metastore\WMGetTriggersForResourePlanRequest();
$xfer += $this->request->read($input);
} else {
$xfer += $input->skip($ftype);
@@ -48846,7 +50865,7 @@ class ThriftHiveMetastore_get_all_resource_plans_args {
public function write($output) {
$xfer = 0;
- $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_args');
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_triggers_for_resourceplan_args');
if ($this->request !== null) {
if (!is_object($this->request)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
@@ -48862,17 +50881,21 @@ class ThriftHiveMetastore_get_all_resource_plans_args {
}
-class ThriftHiveMetastore_get_all_resource_plans_result {
+class ThriftHiveMetastore_get_triggers_for_resourceplan_result {
static $_TSPEC;
/**
- * @var \metastore\WMGetAllResourcePlanResponse
+ * @var \metastore\WMGetTriggersForResourePlanResponse
*/
public $success = null;
/**
- * @var \metastore\MetaException
+ * @var \metastore\NoSuchObjectException
*/
public $o1 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o2 = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -48880,11 +50903,16 @@ class ThriftHiveMetastore_get_all_resource_plans_result {
0 => array(
'var' => 'success',
'type' => TType::STRUCT,
- 'class' => '\metastore\WMGetAllResourcePlanResponse',
+ 'class' => '\metastore\WMGetTriggersForResourePlanResponse',
),
1 => array(
'var' => 'o1',
'type' => TType::STRUCT,
+ 'class' => '\metastore\NoSuchObjectException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
'class' => '\metastore\MetaException',
),
);
@@ -48896,11 +50924,14 @@ class ThriftHiveMetastore_get_all_resource_plans_result {
if (isset($vals['o1'])) {
$this->o1 = $vals['o1'];
}
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
}
}
public function getName() {
- return 'ThriftHiveMetastore_get_all_resource_plans_result';
+ return 'ThriftHiveMetastore_get_triggers_for_resourceplan_result';
}
public function read($input)
@@ -48920,7 +50951,7 @@ class ThriftHiveMetastore_get_all_resource_plans_result {
{
case 0:
if ($ftype == TType::STRUCT) {
- $this->success = new \metastore\WMGetAllResourcePlanResponse();
+ $this->success = new \metastore\WMGetTriggersForResourePlanResponse();
$xfer += $this->success->read($input);
} else {
$xfer += $input->skip($ftype);
@@ -48928,12 +50959,20 @@ class ThriftHiveMetastore_get_all_resource_plans_result {
break;
case 1:
if ($ftype == TType::STRUCT) {
- $this->o1 = new \metastore\MetaException();
+ $this->o1 = new \metastore\NoSuchObjectException();
$xfer += $this->o1->read($input);
} else {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new \metastore\MetaException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -48946,7 +50985,7 @@ class ThriftHiveMetastore_get_all_resource_plans_result {
public function write($output) {
$xfer = 0;
- $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_result');
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_triggers_for_resourceplan_result');
if ($this->success !== null) {
if (!is_object($this->success)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
@@ -48960,6 +50999,11 @@ class ThriftHiveMetastore_get_all_resource_plans_result {
$xfer += $this->o1->write($output);
$xfer += $output->writeFieldEnd();
}
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -48967,11 +51011,11 @@ class ThriftHiveMetastore_get_all_resource_plans_result {
}
-class ThriftHiveMetastore_alter_resource_plan_args {
+class ThriftHiveMetastore_create_wm_pool_args {
static $_TSPEC;
/**
- * @var \metastore\WMAlterResourcePlanRequest
+ * @var \metastore\WMCreatePoolRequest
*/
public $request = null;
@@ -48981,7 +51025,7 @@ class ThriftHiveMetastore_alter_resource_plan_args {
1 => array(
'var' => 'request',
'type' => TType::STRUCT,
- 'class' => '\metastore\WMAlterResourcePlanRequest',
+ 'class' => '\metastore\WMCreatePoolRequest',
),
);
}
@@ -48993,7 +51037,7 @@ class ThriftHiveMetastore_alter_resource_plan_args {
}
public function getName() {
- return 'ThriftHiveMetastore_alter_resource_plan_args';
+ return 'ThriftHiveMetastore_create_wm_pool_args';
}
public function read($input)
@@ -49013,7 +51057,7 @@ class ThriftHiveMetastore_alter_resource_plan_args {
{
case 1:
if ($ftype == TType::STRUCT) {
- $this->request = new \metastore\WMAlterResourcePlanRequest();
+ $this->request = new \metastore\WMCreatePoolRequest();
$xfer += $this->request->read($input);
} else {
$xfer += $input->skip($ftype);
@@ -49031,7 +51075,7 @@ class ThriftHiveMetastore_alter_resource_plan_args {
public function write($output) {
$xfer = 0;
- $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_resource_plan_args');
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_wm_pool_args');
if ($this->request !== null) {
if (!is_object($this->request)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
@@ -49047,25 +51091,29 @@ class ThriftHiveMetastore_alter_resource_plan_args {
}
-class ThriftHiveMetastore_alter_resource_plan_result {
+class ThriftHiveMetastore_create_wm_pool_result {
static $_TSPEC;
/**
- * @var \metastore\WMAlterResourcePlanResponse
+ * @var \metastore\WMCreatePoolResponse
*/
public $success = null;
/**
- * @var \metastore\NoSuchObjectException
+ * @var \metastore\AlreadyExistsException
*/
public $o1 = null;
/**
- * @var \metastore\InvalidOperationException
+ * @var \metastore\NoSuchObjectException
*/
public $o2 = null;
/**
- * @var \metastore\MetaException
+ * @var \metastore\InvalidObjectException
*/
public $o3 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o4 = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -49073,21 +51121,26 @@ class ThriftHiveMetastore_alter_resource_plan_result {
0 => array(
'var' => 'success',
'type' => TType::STRUCT,
-
<TRUNCATED>
[09/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index c0ad739..8524dbd 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -21281,9 +21281,9 @@ void WMMapping::__set_entityName(const std::string& val) {
this->entityName = val;
}
-void WMMapping::__set_poolName(const std::string& val) {
- this->poolName = val;
-__isset.poolName = true;
+void WMMapping::__set_poolPath(const std::string& val) {
+ this->poolPath = val;
+__isset.poolPath = true;
}
void WMMapping::__set_ordering(const int32_t val) {
@@ -21341,8 +21341,8 @@ uint32_t WMMapping::read(::apache::thrift::protocol::TProtocol* iprot) {
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->poolName);
- this->__isset.poolName = true;
+ xfer += iprot->readString(this->poolPath);
+ this->__isset.poolPath = true;
} else {
xfer += iprot->skip(ftype);
}
@@ -21390,9 +21390,9 @@ uint32_t WMMapping::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeString(this->entityName);
xfer += oprot->writeFieldEnd();
- if (this->__isset.poolName) {
- xfer += oprot->writeFieldBegin("poolName", ::apache::thrift::protocol::T_STRING, 4);
- xfer += oprot->writeString(this->poolName);
+ if (this->__isset.poolPath) {
+ xfer += oprot->writeFieldBegin("poolPath", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeString(this->poolPath);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.ordering) {
@@ -21410,7 +21410,7 @@ void swap(WMMapping &a, WMMapping &b) {
swap(a.resourcePlanName, b.resourcePlanName);
swap(a.entityType, b.entityType);
swap(a.entityName, b.entityName);
- swap(a.poolName, b.poolName);
+ swap(a.poolPath, b.poolPath);
swap(a.ordering, b.ordering);
swap(a.__isset, b.__isset);
}
@@ -21419,7 +21419,7 @@ WMMapping::WMMapping(const WMMapping& other870) {
resourcePlanName = other870.resourcePlanName;
entityType = other870.entityType;
entityName = other870.entityName;
- poolName = other870.poolName;
+ poolPath = other870.poolPath;
ordering = other870.ordering;
__isset = other870.__isset;
}
@@ -21427,7 +21427,7 @@ WMMapping& WMMapping::operator=(const WMMapping& other871) {
resourcePlanName = other871.resourcePlanName;
entityType = other871.entityType;
entityName = other871.entityName;
- poolName = other871.poolName;
+ poolPath = other871.poolPath;
ordering = other871.ordering;
__isset = other871.__isset;
return *this;
@@ -21438,7 +21438,7 @@ void WMMapping::printTo(std::ostream& out) const {
out << "resourcePlanName=" << to_string(resourcePlanName);
out << ", " << "entityType=" << to_string(entityType);
out << ", " << "entityName=" << to_string(entityName);
- out << ", " << "poolName="; (__isset.poolName ? (out << to_string(poolName)) : (out << "<null>"));
+ out << ", " << "poolPath="; (__isset.poolPath ? (out << to_string(poolPath)) : (out << "<null>"));
out << ", " << "ordering="; (__isset.ordering ? (out << to_string(ordering)) : (out << "<null>"));
out << ")";
}
@@ -23671,15 +23671,16 @@ void WMGetTriggersForResourePlanResponse::printTo(std::ostream& out) const {
}
-MetaException::~MetaException() throw() {
+WMCreatePoolRequest::~WMCreatePoolRequest() throw() {
}
-void MetaException::__set_message(const std::string& val) {
- this->message = val;
+void WMCreatePoolRequest::__set_pool(const WMPool& val) {
+ this->pool = val;
+__isset.pool = true;
}
-uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t WMCreatePoolRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
uint32_t xfer = 0;
@@ -23701,9 +23702,9 @@ uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) {
switch (fid)
{
case 1:
- if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->message);
- this->__isset.message = true;
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->pool.read(iprot);
+ this->__isset.pool = true;
} else {
xfer += iprot->skip(ftype);
}
@@ -23720,63 +23721,49 @@ uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) {
return xfer;
}
-uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t WMCreatePoolRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
- xfer += oprot->writeStructBegin("MetaException");
-
- xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
- xfer += oprot->writeString(this->message);
- xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeStructBegin("WMCreatePoolRequest");
+ if (this->__isset.pool) {
+ xfer += oprot->writeFieldBegin("pool", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->pool.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
-void swap(MetaException &a, MetaException &b) {
+void swap(WMCreatePoolRequest &a, WMCreatePoolRequest &b) {
using ::std::swap;
- swap(a.message, b.message);
+ swap(a.pool, b.pool);
swap(a.__isset, b.__isset);
}
-MetaException::MetaException(const MetaException& other956) : TException() {
- message = other956.message;
+WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other956) {
+ pool = other956.pool;
__isset = other956.__isset;
}
-MetaException& MetaException::operator=(const MetaException& other957) {
- message = other957.message;
+WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other957) {
+ pool = other957.pool;
__isset = other957.__isset;
return *this;
}
-void MetaException::printTo(std::ostream& out) const {
+void WMCreatePoolRequest::printTo(std::ostream& out) const {
using ::apache::thrift::to_string;
- out << "MetaException(";
- out << "message=" << to_string(message);
+ out << "WMCreatePoolRequest(";
+ out << "pool="; (__isset.pool ? (out << to_string(pool)) : (out << "<null>"));
out << ")";
}
-const char* MetaException::what() const throw() {
- try {
- std::stringstream ss;
- ss << "TException - service has thrown: " << *this;
- this->thriftTExceptionMessageHolder_ = ss.str();
- return this->thriftTExceptionMessageHolder_.c_str();
- } catch (const std::exception&) {
- return "TException - service has thrown: MetaException";
- }
-}
-
-UnknownTableException::~UnknownTableException() throw() {
+WMCreatePoolResponse::~WMCreatePoolResponse() throw() {
}
-void UnknownTableException::__set_message(const std::string& val) {
- this->message = val;
-}
-
-uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t WMCreatePoolResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
uint32_t xfer = 0;
@@ -23795,20 +23782,7 @@ uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* ipro
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
- switch (fid)
- {
- case 1:
- if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->message);
- this->__isset.message = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- default:
- xfer += iprot->skip(ftype);
- break;
- }
+ xfer += iprot->skip(ftype);
xfer += iprot->readFieldEnd();
}
@@ -23817,63 +23791,51 @@ uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* ipro
return xfer;
}
-uint32_t UnknownTableException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t WMCreatePoolResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
- xfer += oprot->writeStructBegin("UnknownTableException");
-
- xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
- xfer += oprot->writeString(this->message);
- xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeStructBegin("WMCreatePoolResponse");
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
-void swap(UnknownTableException &a, UnknownTableException &b) {
+void swap(WMCreatePoolResponse &a, WMCreatePoolResponse &b) {
using ::std::swap;
- swap(a.message, b.message);
- swap(a.__isset, b.__isset);
+ (void) a;
+ (void) b;
}
-UnknownTableException::UnknownTableException(const UnknownTableException& other958) : TException() {
- message = other958.message;
- __isset = other958.__isset;
+WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other958) {
+ (void) other958;
}
-UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other959) {
- message = other959.message;
- __isset = other959.__isset;
+WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other959) {
+ (void) other959;
return *this;
}
-void UnknownTableException::printTo(std::ostream& out) const {
+void WMCreatePoolResponse::printTo(std::ostream& out) const {
using ::apache::thrift::to_string;
- out << "UnknownTableException(";
- out << "message=" << to_string(message);
+ out << "WMCreatePoolResponse(";
out << ")";
}
-const char* UnknownTableException::what() const throw() {
- try {
- std::stringstream ss;
- ss << "TException - service has thrown: " << *this;
- this->thriftTExceptionMessageHolder_ = ss.str();
- return this->thriftTExceptionMessageHolder_.c_str();
- } catch (const std::exception&) {
- return "TException - service has thrown: UnknownTableException";
- }
+
+WMAlterPoolRequest::~WMAlterPoolRequest() throw() {
}
-UnknownDBException::~UnknownDBException() throw() {
+void WMAlterPoolRequest::__set_pool(const WMPool& val) {
+ this->pool = val;
+__isset.pool = true;
}
-
-void UnknownDBException::__set_message(const std::string& val) {
- this->message = val;
+void WMAlterPoolRequest::__set_poolPath(const std::string& val) {
+ this->poolPath = val;
+__isset.poolPath = true;
}
-uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t WMAlterPoolRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
uint32_t xfer = 0;
@@ -23895,9 +23857,17 @@ uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot)
switch (fid)
{
case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->pool.read(iprot);
+ this->__isset.pool = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->message);
- this->__isset.message = true;
+ xfer += iprot->readString(this->poolPath);
+ this->__isset.poolPath = true;
} else {
xfer += iprot->skip(ftype);
}
@@ -23914,63 +23884,58 @@ uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot)
return xfer;
}
-uint32_t UnknownDBException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t WMAlterPoolRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
- xfer += oprot->writeStructBegin("UnknownDBException");
-
- xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
- xfer += oprot->writeString(this->message);
- xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeStructBegin("WMAlterPoolRequest");
+ if (this->__isset.pool) {
+ xfer += oprot->writeFieldBegin("pool", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->pool.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.poolPath) {
+ xfer += oprot->writeFieldBegin("poolPath", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->poolPath);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
-void swap(UnknownDBException &a, UnknownDBException &b) {
+void swap(WMAlterPoolRequest &a, WMAlterPoolRequest &b) {
using ::std::swap;
- swap(a.message, b.message);
+ swap(a.pool, b.pool);
+ swap(a.poolPath, b.poolPath);
swap(a.__isset, b.__isset);
}
-UnknownDBException::UnknownDBException(const UnknownDBException& other960) : TException() {
- message = other960.message;
+WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other960) {
+ pool = other960.pool;
+ poolPath = other960.poolPath;
__isset = other960.__isset;
}
-UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other961) {
- message = other961.message;
+WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other961) {
+ pool = other961.pool;
+ poolPath = other961.poolPath;
__isset = other961.__isset;
return *this;
}
-void UnknownDBException::printTo(std::ostream& out) const {
+void WMAlterPoolRequest::printTo(std::ostream& out) const {
using ::apache::thrift::to_string;
- out << "UnknownDBException(";
- out << "message=" << to_string(message);
+ out << "WMAlterPoolRequest(";
+ out << "pool="; (__isset.pool ? (out << to_string(pool)) : (out << "<null>"));
+ out << ", " << "poolPath="; (__isset.poolPath ? (out << to_string(poolPath)) : (out << "<null>"));
out << ")";
}
-const char* UnknownDBException::what() const throw() {
- try {
- std::stringstream ss;
- ss << "TException - service has thrown: " << *this;
- this->thriftTExceptionMessageHolder_ = ss.str();
- return this->thriftTExceptionMessageHolder_.c_str();
- } catch (const std::exception&) {
- return "TException - service has thrown: UnknownDBException";
- }
-}
-
-AlreadyExistsException::~AlreadyExistsException() throw() {
+WMAlterPoolResponse::~WMAlterPoolResponse() throw() {
}
-void AlreadyExistsException::__set_message(const std::string& val) {
- this->message = val;
-}
-
-uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t WMAlterPoolResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
uint32_t xfer = 0;
@@ -23989,20 +23954,7 @@ uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* ipr
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
- switch (fid)
- {
- case 1:
- if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->message);
- this->__isset.message = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- default:
- xfer += iprot->skip(ftype);
- break;
- }
+ xfer += iprot->skip(ftype);
xfer += iprot->readFieldEnd();
}
@@ -24011,63 +23963,51 @@ uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* ipr
return xfer;
}
-uint32_t AlreadyExistsException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t WMAlterPoolResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
- xfer += oprot->writeStructBegin("AlreadyExistsException");
-
- xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
- xfer += oprot->writeString(this->message);
- xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeStructBegin("WMAlterPoolResponse");
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
-void swap(AlreadyExistsException &a, AlreadyExistsException &b) {
+void swap(WMAlterPoolResponse &a, WMAlterPoolResponse &b) {
using ::std::swap;
- swap(a.message, b.message);
- swap(a.__isset, b.__isset);
+ (void) a;
+ (void) b;
}
-AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other962) : TException() {
- message = other962.message;
- __isset = other962.__isset;
+WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other962) {
+ (void) other962;
}
-AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other963) {
- message = other963.message;
- __isset = other963.__isset;
+WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other963) {
+ (void) other963;
return *this;
}
-void AlreadyExistsException::printTo(std::ostream& out) const {
+void WMAlterPoolResponse::printTo(std::ostream& out) const {
using ::apache::thrift::to_string;
- out << "AlreadyExistsException(";
- out << "message=" << to_string(message);
+ out << "WMAlterPoolResponse(";
out << ")";
}
-const char* AlreadyExistsException::what() const throw() {
- try {
- std::stringstream ss;
- ss << "TException - service has thrown: " << *this;
- this->thriftTExceptionMessageHolder_ = ss.str();
- return this->thriftTExceptionMessageHolder_.c_str();
- } catch (const std::exception&) {
- return "TException - service has thrown: AlreadyExistsException";
- }
+
+WMDropPoolRequest::~WMDropPoolRequest() throw() {
}
-InvalidPartitionException::~InvalidPartitionException() throw() {
+void WMDropPoolRequest::__set_resourcePlanName(const std::string& val) {
+ this->resourcePlanName = val;
+__isset.resourcePlanName = true;
}
-
-void InvalidPartitionException::__set_message(const std::string& val) {
- this->message = val;
+void WMDropPoolRequest::__set_poolPath(const std::string& val) {
+ this->poolPath = val;
+__isset.poolPath = true;
}
-uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t WMDropPoolRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
uint32_t xfer = 0;
@@ -24090,8 +24030,16 @@ uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol*
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->message);
- this->__isset.message = true;
+ xfer += iprot->readString(this->resourcePlanName);
+ this->__isset.resourcePlanName = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->poolPath);
+ this->__isset.poolPath = true;
} else {
xfer += iprot->skip(ftype);
}
@@ -24108,63 +24056,58 @@ uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol*
return xfer;
}
-uint32_t InvalidPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t WMDropPoolRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
- xfer += oprot->writeStructBegin("InvalidPartitionException");
-
- xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
- xfer += oprot->writeString(this->message);
- xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeStructBegin("WMDropPoolRequest");
+ if (this->__isset.resourcePlanName) {
+ xfer += oprot->writeFieldBegin("resourcePlanName", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->resourcePlanName);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.poolPath) {
+ xfer += oprot->writeFieldBegin("poolPath", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->poolPath);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
-void swap(InvalidPartitionException &a, InvalidPartitionException &b) {
+void swap(WMDropPoolRequest &a, WMDropPoolRequest &b) {
using ::std::swap;
- swap(a.message, b.message);
+ swap(a.resourcePlanName, b.resourcePlanName);
+ swap(a.poolPath, b.poolPath);
swap(a.__isset, b.__isset);
}
-InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other964) : TException() {
- message = other964.message;
+WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other964) {
+ resourcePlanName = other964.resourcePlanName;
+ poolPath = other964.poolPath;
__isset = other964.__isset;
}
-InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other965) {
- message = other965.message;
+WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other965) {
+ resourcePlanName = other965.resourcePlanName;
+ poolPath = other965.poolPath;
__isset = other965.__isset;
return *this;
}
-void InvalidPartitionException::printTo(std::ostream& out) const {
+void WMDropPoolRequest::printTo(std::ostream& out) const {
using ::apache::thrift::to_string;
- out << "InvalidPartitionException(";
- out << "message=" << to_string(message);
+ out << "WMDropPoolRequest(";
+ out << "resourcePlanName="; (__isset.resourcePlanName ? (out << to_string(resourcePlanName)) : (out << "<null>"));
+ out << ", " << "poolPath="; (__isset.poolPath ? (out << to_string(poolPath)) : (out << "<null>"));
out << ")";
}
-const char* InvalidPartitionException::what() const throw() {
- try {
- std::stringstream ss;
- ss << "TException - service has thrown: " << *this;
- this->thriftTExceptionMessageHolder_ = ss.str();
- return this->thriftTExceptionMessageHolder_.c_str();
- } catch (const std::exception&) {
- return "TException - service has thrown: InvalidPartitionException";
- }
-}
-
-UnknownPartitionException::~UnknownPartitionException() throw() {
+WMDropPoolResponse::~WMDropPoolResponse() throw() {
}
-void UnknownPartitionException::__set_message(const std::string& val) {
- this->message = val;
-}
-
-uint32_t UnknownPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t WMDropPoolResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
uint32_t xfer = 0;
@@ -24183,20 +24126,7 @@ uint32_t UnknownPartitionException::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
- switch (fid)
- {
- case 1:
- if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->message);
- this->__isset.message = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- default:
- xfer += iprot->skip(ftype);
- break;
- }
+ xfer += iprot->skip(ftype);
xfer += iprot->readFieldEnd();
}
@@ -24205,38 +24135,1140 @@ uint32_t UnknownPartitionException::read(::apache::thrift::protocol::TProtocol*
return xfer;
}
-uint32_t UnknownPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t WMDropPoolResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
- xfer += oprot->writeStructBegin("UnknownPartitionException");
-
- xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
- xfer += oprot->writeString(this->message);
- xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeStructBegin("WMDropPoolResponse");
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
-void swap(UnknownPartitionException &a, UnknownPartitionException &b) {
+void swap(WMDropPoolResponse &a, WMDropPoolResponse &b) {
using ::std::swap;
- swap(a.message, b.message);
- swap(a.__isset, b.__isset);
+ (void) a;
+ (void) b;
}
-UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other966) : TException() {
- message = other966.message;
- __isset = other966.__isset;
+WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other966) {
+ (void) other966;
}
-UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other967) {
- message = other967.message;
- __isset = other967.__isset;
+WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other967) {
+ (void) other967;
return *this;
}
-void UnknownPartitionException::printTo(std::ostream& out) const {
+void WMDropPoolResponse::printTo(std::ostream& out) const {
using ::apache::thrift::to_string;
- out << "UnknownPartitionException(";
+ out << "WMDropPoolResponse(";
+ out << ")";
+}
+
+
+WMCreateOrUpdateMappingRequest::~WMCreateOrUpdateMappingRequest() throw() {
+}
+
+
+void WMCreateOrUpdateMappingRequest::__set_mapping(const WMMapping& val) {
+ this->mapping = val;
+__isset.mapping = true;
+}
+
+void WMCreateOrUpdateMappingRequest::__set_update(const bool val) {
+ this->update = val;
+__isset.update = true;
+}
+
+uint32_t WMCreateOrUpdateMappingRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->mapping.read(iprot);
+ this->__isset.mapping = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_BOOL) {
+ xfer += iprot->readBool(this->update);
+ this->__isset.update = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t WMCreateOrUpdateMappingRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("WMCreateOrUpdateMappingRequest");
+
+ if (this->__isset.mapping) {
+ xfer += oprot->writeFieldBegin("mapping", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->mapping.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.update) {
+ xfer += oprot->writeFieldBegin("update", ::apache::thrift::protocol::T_BOOL, 2);
+ xfer += oprot->writeBool(this->update);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b) {
+ using ::std::swap;
+ swap(a.mapping, b.mapping);
+ swap(a.update, b.update);
+ swap(a.__isset, b.__isset);
+}
+
+WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other968) {
+ mapping = other968.mapping;
+ update = other968.update;
+ __isset = other968.__isset;
+}
+WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other969) {
+ mapping = other969.mapping;
+ update = other969.update;
+ __isset = other969.__isset;
+ return *this;
+}
+void WMCreateOrUpdateMappingRequest::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "WMCreateOrUpdateMappingRequest(";
+ out << "mapping="; (__isset.mapping ? (out << to_string(mapping)) : (out << "<null>"));
+ out << ", " << "update="; (__isset.update ? (out << to_string(update)) : (out << "<null>"));
+ out << ")";
+}
+
+
+WMCreateOrUpdateMappingResponse::~WMCreateOrUpdateMappingResponse() throw() {
+}
+
+
+uint32_t WMCreateOrUpdateMappingResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ xfer += iprot->skip(ftype);
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t WMCreateOrUpdateMappingResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("WMCreateOrUpdateMappingResponse");
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b) {
+ using ::std::swap;
+ (void) a;
+ (void) b;
+}
+
+WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other970) {
+ (void) other970;
+}
+WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other971) {
+ (void) other971;
+ return *this;
+}
+void WMCreateOrUpdateMappingResponse::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "WMCreateOrUpdateMappingResponse(";
+ out << ")";
+}
+
+
+WMDropMappingRequest::~WMDropMappingRequest() throw() {
+}
+
+
+void WMDropMappingRequest::__set_mapping(const WMMapping& val) {
+ this->mapping = val;
+__isset.mapping = true;
+}
+
+uint32_t WMDropMappingRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->mapping.read(iprot);
+ this->__isset.mapping = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t WMDropMappingRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("WMDropMappingRequest");
+
+ if (this->__isset.mapping) {
+ xfer += oprot->writeFieldBegin("mapping", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->mapping.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(WMDropMappingRequest &a, WMDropMappingRequest &b) {
+ using ::std::swap;
+ swap(a.mapping, b.mapping);
+ swap(a.__isset, b.__isset);
+}
+
+WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other972) {
+ mapping = other972.mapping;
+ __isset = other972.__isset;
+}
+WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other973) {
+ mapping = other973.mapping;
+ __isset = other973.__isset;
+ return *this;
+}
+void WMDropMappingRequest::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "WMDropMappingRequest(";
+ out << "mapping="; (__isset.mapping ? (out << to_string(mapping)) : (out << "<null>"));
+ out << ")";
+}
+
+
+WMDropMappingResponse::~WMDropMappingResponse() throw() {
+}
+
+
+uint32_t WMDropMappingResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ xfer += iprot->skip(ftype);
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t WMDropMappingResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("WMDropMappingResponse");
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(WMDropMappingResponse &a, WMDropMappingResponse &b) {
+ using ::std::swap;
+ (void) a;
+ (void) b;
+}
+
+WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other974) {
+ (void) other974;
+}
+WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other975) {
+ (void) other975;
+ return *this;
+}
+void WMDropMappingResponse::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "WMDropMappingResponse(";
+ out << ")";
+}
+
+
+WMCreateOrDropTriggerToPoolMappingRequest::~WMCreateOrDropTriggerToPoolMappingRequest() throw() {
+}
+
+
+void WMCreateOrDropTriggerToPoolMappingRequest::__set_resourcePlanName(const std::string& val) {
+ this->resourcePlanName = val;
+__isset.resourcePlanName = true;
+}
+
+void WMCreateOrDropTriggerToPoolMappingRequest::__set_triggerName(const std::string& val) {
+ this->triggerName = val;
+__isset.triggerName = true;
+}
+
+void WMCreateOrDropTriggerToPoolMappingRequest::__set_poolPath(const std::string& val) {
+ this->poolPath = val;
+__isset.poolPath = true;
+}
+
+void WMCreateOrDropTriggerToPoolMappingRequest::__set_drop(const bool val) {
+ this->drop = val;
+__isset.drop = true;
+}
+
+uint32_t WMCreateOrDropTriggerToPoolMappingRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->resourcePlanName);
+ this->__isset.resourcePlanName = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->triggerName);
+ this->__isset.triggerName = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->poolPath);
+ this->__isset.poolPath = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_BOOL) {
+ xfer += iprot->readBool(this->drop);
+ this->__isset.drop = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t WMCreateOrDropTriggerToPoolMappingRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("WMCreateOrDropTriggerToPoolMappingRequest");
+
+ if (this->__isset.resourcePlanName) {
+ xfer += oprot->writeFieldBegin("resourcePlanName", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->resourcePlanName);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.triggerName) {
+ xfer += oprot->writeFieldBegin("triggerName", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->triggerName);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.poolPath) {
+ xfer += oprot->writeFieldBegin("poolPath", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->poolPath);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.drop) {
+ xfer += oprot->writeFieldBegin("drop", ::apache::thrift::protocol::T_BOOL, 4);
+ xfer += oprot->writeBool(this->drop);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToPoolMappingRequest &b) {
+ using ::std::swap;
+ swap(a.resourcePlanName, b.resourcePlanName);
+ swap(a.triggerName, b.triggerName);
+ swap(a.poolPath, b.poolPath);
+ swap(a.drop, b.drop);
+ swap(a.__isset, b.__isset);
+}
+
+WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other976) {
+ resourcePlanName = other976.resourcePlanName;
+ triggerName = other976.triggerName;
+ poolPath = other976.poolPath;
+ drop = other976.drop;
+ __isset = other976.__isset;
+}
+WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other977) {
+ resourcePlanName = other977.resourcePlanName;
+ triggerName = other977.triggerName;
+ poolPath = other977.poolPath;
+ drop = other977.drop;
+ __isset = other977.__isset;
+ return *this;
+}
+void WMCreateOrDropTriggerToPoolMappingRequest::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "WMCreateOrDropTriggerToPoolMappingRequest(";
+ out << "resourcePlanName="; (__isset.resourcePlanName ? (out << to_string(resourcePlanName)) : (out << "<null>"));
+ out << ", " << "triggerName="; (__isset.triggerName ? (out << to_string(triggerName)) : (out << "<null>"));
+ out << ", " << "poolPath="; (__isset.poolPath ? (out << to_string(poolPath)) : (out << "<null>"));
+ out << ", " << "drop="; (__isset.drop ? (out << to_string(drop)) : (out << "<null>"));
+ out << ")";
+}
+
+
+WMCreateOrDropTriggerToPoolMappingResponse::~WMCreateOrDropTriggerToPoolMappingResponse() throw() {
+}
+
+
+uint32_t WMCreateOrDropTriggerToPoolMappingResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ xfer += iprot->skip(ftype);
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t WMCreateOrDropTriggerToPoolMappingResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("WMCreateOrDropTriggerToPoolMappingResponse");
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerToPoolMappingResponse &b) {
+ using ::std::swap;
+ (void) a;
+ (void) b;
+}
+
+WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other978) {
+ (void) other978;
+}
+WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other979) {
+ (void) other979;
+ return *this;
+}
+void WMCreateOrDropTriggerToPoolMappingResponse::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "WMCreateOrDropTriggerToPoolMappingResponse(";
+ out << ")";
+}
+
+
+MetaException::~MetaException() throw() {
+}
+
+
+void MetaException::__set_message(const std::string& val) {
+ this->message = val;
+}
+
+uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->message);
+ this->__isset.message = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("MetaException");
+
+ xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->message);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(MetaException &a, MetaException &b) {
+ using ::std::swap;
+ swap(a.message, b.message);
+ swap(a.__isset, b.__isset);
+}
+
+MetaException::MetaException(const MetaException& other980) : TException() {
+ message = other980.message;
+ __isset = other980.__isset;
+}
+MetaException& MetaException::operator=(const MetaException& other981) {
+ message = other981.message;
+ __isset = other981.__isset;
+ return *this;
+}
+void MetaException::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "MetaException(";
+ out << "message=" << to_string(message);
+ out << ")";
+}
+
+const char* MetaException::what() const throw() {
+ try {
+ std::stringstream ss;
+ ss << "TException - service has thrown: " << *this;
+ this->thriftTExceptionMessageHolder_ = ss.str();
+ return this->thriftTExceptionMessageHolder_.c_str();
+ } catch (const std::exception&) {
+ return "TException - service has thrown: MetaException";
+ }
+}
+
+
+UnknownTableException::~UnknownTableException() throw() {
+}
+
+
+void UnknownTableException::__set_message(const std::string& val) {
+ this->message = val;
+}
+
+uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->message);
+ this->__isset.message = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t UnknownTableException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("UnknownTableException");
+
+ xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->message);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(UnknownTableException &a, UnknownTableException &b) {
+ using ::std::swap;
+ swap(a.message, b.message);
+ swap(a.__isset, b.__isset);
+}
+
+UnknownTableException::UnknownTableException(const UnknownTableException& other982) : TException() {
+ message = other982.message;
+ __isset = other982.__isset;
+}
+UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other983) {
+ message = other983.message;
+ __isset = other983.__isset;
+ return *this;
+}
+void UnknownTableException::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "UnknownTableException(";
+ out << "message=" << to_string(message);
+ out << ")";
+}
+
+const char* UnknownTableException::what() const throw() {
+ try {
+ std::stringstream ss;
+ ss << "TException - service has thrown: " << *this;
+ this->thriftTExceptionMessageHolder_ = ss.str();
+ return this->thriftTExceptionMessageHolder_.c_str();
+ } catch (const std::exception&) {
+ return "TException - service has thrown: UnknownTableException";
+ }
+}
+
+
+UnknownDBException::~UnknownDBException() throw() {
+}
+
+
+void UnknownDBException::__set_message(const std::string& val) {
+ this->message = val;
+}
+
+uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->message);
+ this->__isset.message = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t UnknownDBException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("UnknownDBException");
+
+ xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->message);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(UnknownDBException &a, UnknownDBException &b) {
+ using ::std::swap;
+ swap(a.message, b.message);
+ swap(a.__isset, b.__isset);
+}
+
+UnknownDBException::UnknownDBException(const UnknownDBException& other984) : TException() {
+ message = other984.message;
+ __isset = other984.__isset;
+}
+UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other985) {
+ message = other985.message;
+ __isset = other985.__isset;
+ return *this;
+}
+void UnknownDBException::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "UnknownDBException(";
+ out << "message=" << to_string(message);
+ out << ")";
+}
+
+const char* UnknownDBException::what() const throw() {
+ try {
+ std::stringstream ss;
+ ss << "TException - service has thrown: " << *this;
+ this->thriftTExceptionMessageHolder_ = ss.str();
+ return this->thriftTExceptionMessageHolder_.c_str();
+ } catch (const std::exception&) {
+ return "TException - service has thrown: UnknownDBException";
+ }
+}
+
+
+AlreadyExistsException::~AlreadyExistsException() throw() {
+}
+
+
+void AlreadyExistsException::__set_message(const std::string& val) {
+ this->message = val;
+}
+
+uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->message);
+ this->__isset.message = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t AlreadyExistsException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("AlreadyExistsException");
+
+ xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->message);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(AlreadyExistsException &a, AlreadyExistsException &b) {
+ using ::std::swap;
+ swap(a.message, b.message);
+ swap(a.__isset, b.__isset);
+}
+
+AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other986) : TException() {
+ message = other986.message;
+ __isset = other986.__isset;
+}
+AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other987) {
+ message = other987.message;
+ __isset = other987.__isset;
+ return *this;
+}
+void AlreadyExistsException::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "AlreadyExistsException(";
+ out << "message=" << to_string(message);
+ out << ")";
+}
+
+const char* AlreadyExistsException::what() const throw() {
+ try {
+ std::stringstream ss;
+ ss << "TException - service has thrown: " << *this;
+ this->thriftTExceptionMessageHolder_ = ss.str();
+ return this->thriftTExceptionMessageHolder_.c_str();
+ } catch (const std::exception&) {
+ return "TException - service has thrown: AlreadyExistsException";
+ }
+}
+
+
+InvalidPartitionException::~InvalidPartitionException() throw() {
+}
+
+
+void InvalidPartitionException::__set_message(const std::string& val) {
+ this->message = val;
+}
+
+uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->message);
+ this->__isset.message = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t InvalidPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("InvalidPartitionException");
+
+ xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->message);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(InvalidPartitionException &a, InvalidPartitionException &b) {
+ using ::std::swap;
+ swap(a.message, b.message);
+ swap(a.__isset, b.__isset);
+}
+
+InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other988) : TException() {
+ message = other988.message;
+ __isset = other988.__isset;
+}
+InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other989) {
+ message = other989.message;
+ __isset = other989.__isset;
+ return *this;
+}
+void InvalidPartitionException::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "InvalidPartitionException(";
+ out << "message=" << to_string(message);
+ out << ")";
+}
+
+const char* InvalidPartitionException::what() const throw() {
+ try {
+ std::stringstream ss;
+ ss << "TException - service has thrown: " << *this;
+ this->thriftTExceptionMessageHolder_ = ss.str();
+ return this->thriftTExceptionMessageHolder_.c_str();
+ } catch (const std::exception&) {
+ return "TException - service has thrown: InvalidPartitionException";
+ }
+}
+
+
+UnknownPartitionException::~UnknownPartitionException() throw() {
+}
+
+
+void UnknownPartitionException::__set_message(const std::string& val) {
+ this->message = val;
+}
+
+uint32_t UnknownPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->message);
+ this->__isset.message = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t UnknownPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("UnknownPartitionException");
+
+ xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->message);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(UnknownPartitionException &a, UnknownPartitionException &b) {
+ using ::std::swap;
+ swap(a.message, b.message);
+ swap(a.__isset, b.__isset);
+}
+
+UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other990) : TException() {
+ message = other990.message;
+ __isset = other990.__isset;
+}
+UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other991) {
+ message = other991.message;
+ __isset = other991.__isset;
+ return *this;
+}
+void UnknownPartitionException::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "UnknownPartitionException(";
out << "message=" << to_string(message);
out << ")";
}
@@ -24322,13 +25354,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) {
swap(a.__isset, b.__isset);
}
-InvalidObjectException::InvalidObjectException(const InvalidObjectException& other968) : TException() {
- message = other968.message;
- __isset = other968.__isset;
+InvalidObjectException::InvalidObjectException(const InvalidObjectException& other992) : TException() {
+ message = other992.message;
+ __isset = other992.__isset;
}
-InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other969) {
- message = other969.message;
- __isset = other969.__isset;
+InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other993) {
+ message = other993.message;
+ __isset = other993.__isset;
return *this;
}
void InvalidObjectException::printTo(std::ostream& out) const {
@@ -24419,13 +25451,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) {
swap(a.__isset, b.__isset);
}
-NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other970) : TException() {
- message = other970.message;
- __isset = other970.__isset;
+NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other994) : TException() {
+ message = other994.message;
+ __isset = other994.__isset;
}
-NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other971) {
- message = other971.message;
- __isset = other971.__isset;
+NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other995) {
+ message = other995.message;
+ __isset = other995.__isset;
return *this;
}
void NoSuchObjectException::printTo(std::ostream& out) const {
@@ -24516,13 +25548,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) {
swap(a.__isset, b.__isset);
}
-IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other972) : TException() {
- message = other972.message;
- __isset = other972.__isset;
+IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other996) : TException() {
+ message = other996.message;
+ __isset = other996.__isset;
}
-IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other973) {
- message = other973.message;
- __isset = other973.__isset;
+IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other997) {
+ message = other997.message;
+ __isset = other997.__isset;
return *this;
}
void IndexAlreadyExistsException::printTo(std::ostream& out) const {
@@ -24613,13 +25645,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) {
swap(a.__isset, b.__isset);
}
-InvalidOperationException::InvalidOperationException(const InvalidOperationException& other974) : TException() {
- message = other974.message;
- __isset = other974.__isset;
+InvalidOperationException::InvalidOperationException(const InvalidOperationException& other998) : TException() {
+ message = other998.message;
+ __isset = other998.__isset;
}
-InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other975) {
- message = other975.message;
- __isset = other975.__isset;
+InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other999) {
+ message = other999.message;
+ __isset = other999.__isset;
return *this;
}
void InvalidOperationException::printTo(std::ostream& out) const {
@@ -24710,13 +25742,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) {
swap(a.__isset, b.__isset);
}
-ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other976) : TException() {
- message = other976.message;
- __isset = other976.__isset;
+ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1000) : TException() {
+ message = other1000.message;
+ __isset = other1000.__isset;
}
-ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other977) {
- message = other977.message;
- __isset = other977.__isset;
+ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1001) {
+ message = other1001.message;
+ __isset = other1001.__isset;
return *this;
}
void ConfigValSecurityException::printTo(std::ostream& out) const {
@@ -24807,13 +25839,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) {
swap(a.__isset, b.__isset);
}
-InvalidInputException::InvalidInputException(const InvalidInputException& other978) : TException() {
- message = other978.message;
- __isset = other978.__isset;
+InvalidInputException::InvalidInputException(const InvalidInputException& other1002) : TException() {
+ message = other1002.message;
+ __isset = other1002.__isset;
}
-InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other979) {
- message = other979.message;
- __isset = other979.__isset;
+InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1003) {
+ message = other1003.message;
+ __isset = other1003.__isset;
return *this;
}
void InvalidInputException::printTo(std::ostream& out) const {
@@ -24904,13 +25936,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) {
swap(a.__isset, b.__isset);
}
-NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other980) : TException() {
- message = other980.message;
- __isset = other980.__isset;
+NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1004) : TException() {
+ message = other1004.message;
+ __isset = other1004.__isset;
}
-NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other981) {
- message = other981.message;
- __isset = other981.__isset;
+NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1005) {
+ message = other1005.message;
+ __isset = other1005.__isset;
return *this;
}
void NoSuchTxnException::printTo(std::ostream& out) const {
@@ -25001,13 +26033,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) {
swap(a.__isset, b.__isset);
}
-TxnAbortedException::TxnAbortedException(const TxnAbortedException& other982) : TException() {
- message = other982.message;
- __isset = other982.__isset;
+TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1006) : TException() {
+ message = other1006.message;
+ __isset = other1006.__isset;
}
-TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other983) {
- message = other983.message;
- __isset = other983.__isset;
+TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1007) {
+ message = other1007.message;
+ __isset = other1007.__isset;
return *this;
}
void TxnAbortedException::printTo(std::ostream& out) const {
@@ -25098,13 +26130,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) {
swap(a.__isset, b.__isset);
}
-TxnOpenException::TxnOpenException(const TxnOpenException& other984) : TException() {
- message = other984.message;
- __isset = other984.__isset;
+TxnOpenException::TxnOpenException(const TxnOpenException& other1008) : TException() {
+ message = other1008.message;
+ __isset = other1008.__isset;
}
-TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other985) {
- message = other985.message;
- __isset = other985.__isset;
+TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1009) {
+ message = other1009.message;
+ __isset = other1009.__isset;
return *this;
}
void TxnOpenException::printTo(std::ostream& out) const {
@@ -25195,13 +26227,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) {
swap(a.__isset, b.__isset);
}
-NoSuchLockException::NoSuchLockException(const NoSuchLockException& other986) : TException() {
- message = other986.message;
- __isset = other986.__isset;
+NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1010) : TException() {
+ message = other1010.message;
+ __isset = other1010.__isset;
}
-NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other987) {
- message = other987.message;
- __isset = other987.__isset;
+NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1011) {
+ message = other1011.message;
+ __isset = other1011.__isset;
return *this;
}
void NoSuchLockException::printTo(std::ostream& out) const {
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index cdf0570..9cd7793 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -505,6 +505,30 @@ class WMGetTriggersForResourePlanRequest;
class WMGetTriggersForResourePlanResponse;
+class WMCreatePoolRequest;
+
+class WMCreatePoolResponse;
+
+class WMAlterPoolRequest;
+
+class WMAlterPoolResponse;
+
+class WMDropPoolRequest;
+
+class WMDropPoolResponse;
+
+class WMCreateOrUpdateMappingRequest;
+
+class WMCreateOrUpdateMappingResponse;
+
+class WMDropMappingRequest;
+
+class WMDropMappingResponse;
+
+class WMCreateOrDropTriggerToPoolMappingRequest;
+
+class WMCreateOrDropTriggerToPoolMappingResponse;
+
class MetaException;
class UnknownTableException;
@@ -8718,8 +8742,8 @@ inline std::ostream& operator<<(std::ostream& out, const WMTrigger& obj)
}
typedef struct _WMMapping__isset {
- _WMMapping__isset() : poolName(false), ordering(false) {}
- bool poolName :1;
+ _WMMapping__isset() : poolPath(false), ordering(false) {}
+ bool poolPath :1;
bool ordering :1;
} _WMMapping__isset;
@@ -8728,14 +8752,14 @@ class WMMapping {
WMMapping(const WMMapping&);
WMMapping& operator=(const WMMapping&);
- WMMapping() : resourcePlanName(), entityType(), entityName(), poolName(), ordering(0) {
+ WMMapping() : resourcePlanName(), entityType(), entityName(), poolPath(), ordering(0) {
}
virtual ~WMMapping() throw();
std::string resourcePlanName;
std::string entityType;
std::string entityName;
- std::string poolName;
+ std::string poolPath;
int32_t ordering;
_WMMapping__isset __isset;
@@ -8746,7 +8770,7 @@ class WMMapping {
void __set_entityName(const std::string& val);
- void __set_poolName(const std::string& val);
+ void __set_poolPath(const std::string& val);
void __set_ordering(const int32_t val);
@@ -8758,9 +8782,9 @@ class WMMapping {
return false;
if (!(entityName == rhs.entityName))
return false;
- if (__isset.poolName != rhs.__isset.poolName)
+ if (__isset.poolPath != rhs.__isset.poolPath)
return false;
- else if (__isset.poolName && !(poolName == rhs.poolName))
+ else if (__isset.poolPath && !(poolPath == rhs.poolPath))
return false;
if (__isset.ordering != rhs.__isset.ordering)
return false;
@@ -9896,6 +9920,552 @@ inline std::ostream& operator<<(std::ostream& out, const WMGetTriggersForResoure
return out;
}
+typedef struct _WMCreatePoolRequest__isset {
+ _WMCreatePoolRequest__isset() : pool(false) {}
+ bool pool :1;
+} _WMCreatePoolRequest__isset;
+
+class WMCreatePoolRequest {
+ public:
+
+ WMCreatePoolRequest(const WMCreatePoolRequest&);
+ WMCreatePoolRequest& operator=(const WMCreatePoolRequest&);
+ WMCreatePoolRequest() {
+ }
+
+ virtual ~WMCreatePoolRequest() throw();
+ WMPool pool;
+
+ _WMCreatePoolRequest__isset __isset;
+
+ void __set_pool(const WMPool& val);
+
+ bool operator == (const WMCreatePoolRequest & rhs) const
+ {
+ if (__isset.pool != rhs.__isset.pool)
+ return false;
+ else if (__isset.pool && !(pool == rhs.pool))
+ return false;
+ return true;
+ }
+ bool operator != (const WMCreatePoolRequest &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMCreatePoolRequest & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMCreatePoolRequest &a, WMCreatePoolRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMCreatePoolRequest& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+
+class WMCreatePoolResponse {
+ public:
+
+ WMCreatePoolResponse(const WMCreatePoolResponse&);
+ WMCreatePoolResponse& operator=(const WMCreatePoolResponse&);
+ WMCreatePoolResponse() {
+ }
+
+ virtual ~WMCreatePoolResponse() throw();
+
+ bool operator == (const WMCreatePoolResponse & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const WMCreatePoolResponse &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMCreatePoolResponse & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMCreatePoolResponse &a, WMCreatePoolResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMCreatePoolResponse& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+typedef struct _WMAlterPoolRequest__isset {
+ _WMAlterPoolRequest__isset() : pool(false), poolPath(false) {}
+ bool pool :1;
+ bool poolPath :1;
+} _WMAlterPoolRequest__isset;
+
+class WMAlterPoolRequest {
+ public:
+
+ WMAlterPoolRequest(const WMAlterPoolRequest&);
+ WMAlterPoolRequest& operator=(const WMAlterPoolRequest&);
+ WMAlterPoolRequest() : poolPath() {
+ }
+
+ virtual ~WMAlterPoolRequest() throw();
+ WMPool pool;
+ std::string poolPath;
+
+ _WMAlterPoolRequest__isset __isset;
+
+ void __set_pool(const WMPool& val);
+
+ void __set_poolPath(const std::string& val);
+
+ bool operator == (const WMAlterPoolRequest & rhs) const
+ {
+ if (__isset.pool != rhs.__isset.pool)
+ return false;
+ else if (__isset.pool && !(pool == rhs.pool))
+ return false;
+ if (__isset.poolPath != rhs.__isset.poolPath)
+ return false;
+ else if (__isset.poolPath && !(poolPath == rhs.poolPath))
+ return false;
+ return true;
+ }
+ bool operator != (const WMAlterPoolRequest &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMAlterPoolRequest & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMAlterPoolRequest &a, WMAlterPoolRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMAlterPoolRequest& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+
+class WMAlterPoolResponse {
+ public:
+
+ WMAlterPoolResponse(const WMAlterPoolResponse&);
+ WMAlterPoolResponse& operator=(const WMAlterPoolResponse&);
+ WMAlterPoolResponse() {
+ }
+
+ virtual ~WMAlterPoolResponse() throw();
+
+ bool operator == (const WMAlterPoolResponse & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const WMAlterPoolResponse &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMAlterPoolResponse & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMAlterPoolResponse &a, WMAlterPoolResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMAlterPoolResponse& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+typedef struct _WMDropPoolRequest__isset {
+ _WMDropPoolRequest__isset() : resourcePlanName(false), poolPath(false) {}
+ bool resourcePlanName :1;
+ bool poolPath :1;
+} _WMDropPoolRequest__isset;
+
+class WMDropPoolRequest {
+ public:
+
+ WMDropPoolRequest(const WMDropPoolRequest&);
+ WMDropPoolRequest& operator=(const WMDropPoolRequest&);
+ WMDropPoolRequest() : resourcePlanName(), poolPath() {
+ }
+
+ virtual ~WMDropPoolRequest() throw();
+ std::string resourcePlanName;
+ std::string poolPath;
+
+ _WMDropPoolRequest__isset __isset;
+
+ void __set_resourcePlanName(const std::string& val);
+
+ void __set_poolPath(const std::string& val);
+
+ bool operator == (const WMDropPoolRequest & rhs) const
+ {
+ if (__isset.resourcePlanName != rhs.__isset.resourcePlanName)
+ return false;
+ else if (__isset.resourcePlanName && !(resourcePlanName == rhs.resourcePlanName))
+ return false;
+ if (__isset.poolPath != rhs.__isset.poolPath)
+ return false;
+ else if (__isset.poolPath && !(poolPath == rhs.poolPath))
+ return false;
+ return true;
+ }
+ bool operator != (const WMDropPoolRequest &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMDropPoolRequest & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMDropPoolRequest &a, WMDropPoolRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMDropPoolRequest& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+
+class WMDropPoolResponse {
+ public:
+
+ WMDropPoolResponse(const WMDropPoolResponse&);
+ WMDropPoolResponse& operator=(const WMDropPoolResponse&);
+ WMDropPoolResponse() {
+ }
+
+ virtual ~WMDropPoolResponse() throw();
+
+ bool operator == (const WMDropPoolResponse & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const WMDropPoolResponse &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMDropPoolResponse & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMDropPoolResponse &a, WMDropPoolResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMDropPoolResponse& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+typedef struct _WMCreateOrUpdateMappingRequest__isset {
+ _WMCreateOrUpdateMappingRequest__isset() : mapping(false), update(false) {}
+ bool mapping :1;
+ bool update :1;
+} _WMCreateOrUpdateMappingRequest__isset;
+
+class WMCreateOrUpdateMappingRequest {
+ public:
+
+ WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest&);
+ WMCreateOrUpdateMappingRequest& operator=(const WMCreateOrUpdateMappingRequest&);
+ WMCreateOrUpdateMappingRequest() : update(0) {
+ }
+
+ virtual ~WMCreateOrUpdateMappingRequest() throw();
+ WMMapping mapping;
+ bool update;
+
+ _WMCreateOrUpdateMappingRequest__isset __isset;
+
+ void __set_mapping(const WMMapping& val);
+
+ void __set_update(const bool val);
+
+ bool operator == (const WMCreateOrUpdateMappingRequest & rhs) const
+ {
+ if (__isset.mapping != rhs.__isset.mapping)
+ return false;
+ else if (__isset.mapping && !(mapping == rhs.mapping))
+ return false;
+ if (__isset.update != rhs.__isset.update)
+ return false;
+ else if (__isset.update && !(update == rhs.update))
+ return false;
+ return true;
+ }
+ bool operator != (const WMCreateOrUpdateMappingRequest &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMCreateOrUpdateMappingRequest & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMCreateOrUpdateMappingRequest& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+
+class WMCreateOrUpdateMappingResponse {
+ public:
+
+ WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse&);
+ WMCreateOrUpdateMappingResponse& operator=(const WMCreateOrUpdateMappingResponse&);
+ WMCreateOrUpdateMappingResponse() {
+ }
+
+ virtual ~WMCreateOrUpdateMappingResponse() throw();
+
+ bool operator == (const WMCreateOrUpdateMappingResponse & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const WMCreateOrUpdateMappingResponse &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMCreateOrUpdateMappingResponse & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMCreateOrUpdateMappingResponse& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+typedef struct _WMDropMappingRequest__isset {
+ _WMDropMappingRequest__isset() : mapping(false) {}
+ bool mapping :1;
+} _WMDropMappingRequest__isset;
+
+class WMDropMappingRequest {
+ public:
+
+ WMDropMappingRequest(const WMDropMappingRequest&);
+ WMDropMappingRequest& operator=(const WMDropMappingRequest&);
+ WMDropMappingRequest() {
+ }
+
+ virtual ~WMDropMappingRequest() throw();
+ WMMapping mapping;
+
+ _WMDropMappingRequest__isset __isset;
+
+ void __set_mapping(const WMMapping& val);
+
+ bool operator == (const WMDropMappingRequest & rhs) const
+ {
+ if (__isset.mapping != rhs.__isset.mapping)
+ return false;
+ else if (__isset.mapping && !(mapping == rhs.mapping))
+ return false;
+ return true;
+ }
+ bool operator != (const WMDropMappingRequest &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMDropMappingRequest & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMDropMappingRequest &a, WMDropMappingRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMDropMappingRequest& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+
+class WMDropMappingResponse {
+ public:
+
+ WMDropMappingResponse(const WMDropMappingResponse&);
+ WMDropMappingResponse& operator=(const WMDropMappingResponse&);
+ WMDropMappingResponse() {
+ }
+
+ virtual ~WMDropMappingResponse() throw();
+
+ bool operator == (const WMDropMappingResponse & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const WMDropMappingResponse &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMDropMappingResponse & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMDropMappingResponse &a, WMDropMappingResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMDropMappingResponse& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+typedef struct _WMCreateOrDropTriggerToPoolMappingRequest__isset {
+ _WMCreateOrDropTriggerToPoolMappingRequest__isset() : resourcePlanName(false), triggerName(false), poolPath(false), drop(false) {}
+ bool resourcePlanName :1;
+ bool triggerName :1;
+ bool poolPath :1;
+ bool drop :1;
+} _WMCreateOrDropTriggerToPoolMappingRequest__isset;
+
+class WMCreateOrDropTriggerToPoolMappingRequest {
+ public:
+
+ WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest&);
+ WMCreateOrDropTriggerToPoolMappingRequest& operator=(const WMCreateOrDropTriggerToPoolMappingRequest&);
+ WMCreateOrDropTriggerToPoolMappingRequest() : resourcePlanName(), triggerName(), poolPath(), drop(0) {
+ }
+
+ virtual ~WMCreateOrDropTriggerToPoolMappingRequest() throw();
+ std::string resourcePlanName;
+ std::string triggerName;
+ std::string poolPath;
+ bool drop;
+
+ _WMCreateOrDropTriggerToPoolMappingRequest__isset __isset;
+
+ void __set_resourcePlanName(const std::string& val);
+
+ void __set_triggerName(const std::string& val);
+
+ void __set_poolPath(const std::string& val);
+
+ void __set_drop(const bool val);
+
+ bool operator == (const WMCreateOrDropTriggerToPoolMappingRequest & rhs) const
+ {
+ if (__isset.resourcePlanName != rhs.__isset.resourcePlanName)
+ return false;
+ else if (__isset.resourcePlanName && !(resourcePlanName == rhs.resourcePlanName))
+ return false;
+ if (__isset.triggerName != rhs.__isset.triggerName)
+ return false;
+ else if (__isset.triggerName && !(triggerName == rhs.triggerName))
+ return false;
+ if (__isset.poolPath != rhs.__isset.poolPath)
+ return false;
+ else if (__isset.poolPath && !(poolPath == rhs.poolPath))
+ return false;
+ if (__isset.drop != rhs.__isset.drop)
+ return false;
+ else if (__isset.drop && !(drop == rhs.drop))
+ return false;
+ return true;
+ }
+ bool operator != (const WMCreateOrDropTriggerToPoolMappingRequest &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMCreateOrDropTriggerToPoolMappingRequest & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToPoolMappingRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMCreateOrDropTriggerToPoolMappingRequest& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+
+class WMCreateOrDropTriggerToPoolMappingResponse {
+ public:
+
+ WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse&);
+ WMCreateOrDropTriggerToPoolMappingResponse& operator=(const WMCreateOrDropTriggerToPoolMappingResponse&);
+ WMCreateOrDropTriggerToPoolMappingResponse() {
+ }
+
+ virtual ~WMCreateOrDropTriggerToPoolMappingResponse() throw();
+
+ bool operator == (const WMCreateOrDropTriggerToPoolMappingResponse & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const WMCreateOrDropTriggerToPoolMappingResponse &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const WMCreateOrDropTriggerToPoolMappingResponse & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerToPoolMappingResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const WMCreateOrDropTriggerToPoolMappingResponse& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
typedef struct _MetaException__isset {
_MetaException__isset() : message(false) {}
bool message :1;
[11/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 58c5af5..ecf2509 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size988;
- ::apache::thrift::protocol::TType _etype991;
- xfer += iprot->readListBegin(_etype991, _size988);
- this->success.resize(_size988);
- uint32_t _i992;
- for (_i992 = 0; _i992 < _size988; ++_i992)
+ uint32_t _size1012;
+ ::apache::thrift::protocol::TType _etype1015;
+ xfer += iprot->readListBegin(_etype1015, _size1012);
+ this->success.resize(_size1012);
+ uint32_t _i1016;
+ for (_i1016 = 0; _i1016 < _size1012; ++_i1016)
{
- xfer += iprot->readString(this->success[_i992]);
+ xfer += iprot->readString(this->success[_i1016]);
}
xfer += iprot->readListEnd();
}
@@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter993;
- for (_iter993 = this->success.begin(); _iter993 != this->success.end(); ++_iter993)
+ std::vector<std::string> ::const_iterator _iter1017;
+ for (_iter1017 = this->success.begin(); _iter1017 != this->success.end(); ++_iter1017)
{
- xfer += oprot->writeString((*_iter993));
+ xfer += oprot->writeString((*_iter1017));
}
xfer += oprot->writeListEnd();
}
@@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size994;
- ::apache::thrift::protocol::TType _etype997;
- xfer += iprot->readListBegin(_etype997, _size994);
- (*(this->success)).resize(_size994);
- uint32_t _i998;
- for (_i998 = 0; _i998 < _size994; ++_i998)
+ uint32_t _size1018;
+ ::apache::thrift::protocol::TType _etype1021;
+ xfer += iprot->readListBegin(_etype1021, _size1018);
+ (*(this->success)).resize(_size1018);
+ uint32_t _i1022;
+ for (_i1022 = 0; _i1022 < _size1018; ++_i1022)
{
- xfer += iprot->readString((*(this->success))[_i998]);
+ xfer += iprot->readString((*(this->success))[_i1022]);
}
xfer += iprot->readListEnd();
}
@@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size999;
- ::apache::thrift::protocol::TType _etype1002;
- xfer += iprot->readListBegin(_etype1002, _size999);
- this->success.resize(_size999);
- uint32_t _i1003;
- for (_i1003 = 0; _i1003 < _size999; ++_i1003)
+ uint32_t _size1023;
+ ::apache::thrift::protocol::TType _etype1026;
+ xfer += iprot->readListBegin(_etype1026, _size1023);
+ this->success.resize(_size1023);
+ uint32_t _i1027;
+ for (_i1027 = 0; _i1027 < _size1023; ++_i1027)
{
- xfer += iprot->readString(this->success[_i1003]);
+ xfer += iprot->readString(this->success[_i1027]);
}
xfer += iprot->readListEnd();
}
@@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1004;
- for (_iter1004 = this->success.begin(); _iter1004 != this->success.end(); ++_iter1004)
+ std::vector<std::string> ::const_iterator _iter1028;
+ for (_iter1028 = this->success.begin(); _iter1028 != this->success.end(); ++_iter1028)
{
- xfer += oprot->writeString((*_iter1004));
+ xfer += oprot->writeString((*_iter1028));
}
xfer += oprot->writeListEnd();
}
@@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1005;
- ::apache::thrift::protocol::TType _etype1008;
- xfer += iprot->readListBegin(_etype1008, _size1005);
- (*(this->success)).resize(_size1005);
- uint32_t _i1009;
- for (_i1009 = 0; _i1009 < _size1005; ++_i1009)
+ uint32_t _size1029;
+ ::apache::thrift::protocol::TType _etype1032;
+ xfer += iprot->readListBegin(_etype1032, _size1029);
+ (*(this->success)).resize(_size1029);
+ uint32_t _i1033;
+ for (_i1033 = 0; _i1033 < _size1029; ++_i1033)
{
- xfer += iprot->readString((*(this->success))[_i1009]);
+ xfer += iprot->readString((*(this->success))[_i1033]);
}
xfer += iprot->readListEnd();
}
@@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size1010;
- ::apache::thrift::protocol::TType _ktype1011;
- ::apache::thrift::protocol::TType _vtype1012;
- xfer += iprot->readMapBegin(_ktype1011, _vtype1012, _size1010);
- uint32_t _i1014;
- for (_i1014 = 0; _i1014 < _size1010; ++_i1014)
+ uint32_t _size1034;
+ ::apache::thrift::protocol::TType _ktype1035;
+ ::apache::thrift::protocol::TType _vtype1036;
+ xfer += iprot->readMapBegin(_ktype1035, _vtype1036, _size1034);
+ uint32_t _i1038;
+ for (_i1038 = 0; _i1038 < _size1034; ++_i1038)
{
- std::string _key1015;
- xfer += iprot->readString(_key1015);
- Type& _val1016 = this->success[_key1015];
- xfer += _val1016.read(iprot);
+ std::string _key1039;
+ xfer += iprot->readString(_key1039);
+ Type& _val1040 = this->success[_key1039];
+ xfer += _val1040.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::map<std::string, Type> ::const_iterator _iter1017;
- for (_iter1017 = this->success.begin(); _iter1017 != this->success.end(); ++_iter1017)
+ std::map<std::string, Type> ::const_iterator _iter1041;
+ for (_iter1041 = this->success.begin(); _iter1041 != this->success.end(); ++_iter1041)
{
- xfer += oprot->writeString(_iter1017->first);
- xfer += _iter1017->second.write(oprot);
+ xfer += oprot->writeString(_iter1041->first);
+ xfer += _iter1041->second.write(oprot);
}
xfer += oprot->writeMapEnd();
}
@@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size1018;
- ::apache::thrift::protocol::TType _ktype1019;
- ::apache::thrift::protocol::TType _vtype1020;
- xfer += iprot->readMapBegin(_ktype1019, _vtype1020, _size1018);
- uint32_t _i1022;
- for (_i1022 = 0; _i1022 < _size1018; ++_i1022)
+ uint32_t _size1042;
+ ::apache::thrift::protocol::TType _ktype1043;
+ ::apache::thrift::protocol::TType _vtype1044;
+ xfer += iprot->readMapBegin(_ktype1043, _vtype1044, _size1042);
+ uint32_t _i1046;
+ for (_i1046 = 0; _i1046 < _size1042; ++_i1046)
{
- std::string _key1023;
- xfer += iprot->readString(_key1023);
- Type& _val1024 = (*(this->success))[_key1023];
- xfer += _val1024.read(iprot);
+ std::string _key1047;
+ xfer += iprot->readString(_key1047);
+ Type& _val1048 = (*(this->success))[_key1047];
+ xfer += _val1048.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1025;
- ::apache::thrift::protocol::TType _etype1028;
- xfer += iprot->readListBegin(_etype1028, _size1025);
- this->success.resize(_size1025);
- uint32_t _i1029;
- for (_i1029 = 0; _i1029 < _size1025; ++_i1029)
+ uint32_t _size1049;
+ ::apache::thrift::protocol::TType _etype1052;
+ xfer += iprot->readListBegin(_etype1052, _size1049);
+ this->success.resize(_size1049);
+ uint32_t _i1053;
+ for (_i1053 = 0; _i1053 < _size1049; ++_i1053)
{
- xfer += this->success[_i1029].read(iprot);
+ xfer += this->success[_i1053].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1030;
- for (_iter1030 = this->success.begin(); _iter1030 != this->success.end(); ++_iter1030)
+ std::vector<FieldSchema> ::const_iterator _iter1054;
+ for (_iter1054 = this->success.begin(); _iter1054 != this->success.end(); ++_iter1054)
{
- xfer += (*_iter1030).write(oprot);
+ xfer += (*_iter1054).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1031;
- ::apache::thrift::protocol::TType _etype1034;
- xfer += iprot->readListBegin(_etype1034, _size1031);
- (*(this->success)).resize(_size1031);
- uint32_t _i1035;
- for (_i1035 = 0; _i1035 < _size1031; ++_i1035)
+ uint32_t _size1055;
+ ::apache::thrift::protocol::TType _etype1058;
+ xfer += iprot->readListBegin(_etype1058, _size1055);
+ (*(this->success)).resize(_size1055);
+ uint32_t _i1059;
+ for (_i1059 = 0; _i1059 < _size1055; ++_i1059)
{
- xfer += (*(this->success))[_i1035].read(iprot);
+ xfer += (*(this->success))[_i1059].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1036;
- ::apache::thrift::protocol::TType _etype1039;
- xfer += iprot->readListBegin(_etype1039, _size1036);
- this->success.resize(_size1036);
- uint32_t _i1040;
- for (_i1040 = 0; _i1040 < _size1036; ++_i1040)
+ uint32_t _size1060;
+ ::apache::thrift::protocol::TType _etype1063;
+ xfer += iprot->readListBegin(_etype1063, _size1060);
+ this->success.resize(_size1060);
+ uint32_t _i1064;
+ for (_i1064 = 0; _i1064 < _size1060; ++_i1064)
{
- xfer += this->success[_i1040].read(iprot);
+ xfer += this->success[_i1064].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1041;
- for (_iter1041 = this->success.begin(); _iter1041 != this->success.end(); ++_iter1041)
+ std::vector<FieldSchema> ::const_iterator _iter1065;
+ for (_iter1065 = this->success.begin(); _iter1065 != this->success.end(); ++_iter1065)
{
- xfer += (*_iter1041).write(oprot);
+ xfer += (*_iter1065).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1042;
- ::apache::thrift::protocol::TType _etype1045;
- xfer += iprot->readListBegin(_etype1045, _size1042);
- (*(this->success)).resize(_size1042);
- uint32_t _i1046;
- for (_i1046 = 0; _i1046 < _size1042; ++_i1046)
+ uint32_t _size1066;
+ ::apache::thrift::protocol::TType _etype1069;
+ xfer += iprot->readListBegin(_etype1069, _size1066);
+ (*(this->success)).resize(_size1066);
+ uint32_t _i1070;
+ for (_i1070 = 0; _i1070 < _size1066; ++_i1070)
{
- xfer += (*(this->success))[_i1046].read(iprot);
+ xfer += (*(this->success))[_i1070].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1047;
- ::apache::thrift::protocol::TType _etype1050;
- xfer += iprot->readListBegin(_etype1050, _size1047);
- this->success.resize(_size1047);
- uint32_t _i1051;
- for (_i1051 = 0; _i1051 < _size1047; ++_i1051)
+ uint32_t _size1071;
+ ::apache::thrift::protocol::TType _etype1074;
+ xfer += iprot->readListBegin(_etype1074, _size1071);
+ this->success.resize(_size1071);
+ uint32_t _i1075;
+ for (_i1075 = 0; _i1075 < _size1071; ++_i1075)
{
- xfer += this->success[_i1051].read(iprot);
+ xfer += this->success[_i1075].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1052;
- for (_iter1052 = this->success.begin(); _iter1052 != this->success.end(); ++_iter1052)
+ std::vector<FieldSchema> ::const_iterator _iter1076;
+ for (_iter1076 = this->success.begin(); _iter1076 != this->success.end(); ++_iter1076)
{
- xfer += (*_iter1052).write(oprot);
+ xfer += (*_iter1076).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1053;
- ::apache::thrift::protocol::TType _etype1056;
- xfer += iprot->readListBegin(_etype1056, _size1053);
- (*(this->success)).resize(_size1053);
- uint32_t _i1057;
- for (_i1057 = 0; _i1057 < _size1053; ++_i1057)
+ uint32_t _size1077;
+ ::apache::thrift::protocol::TType _etype1080;
+ xfer += iprot->readListBegin(_etype1080, _size1077);
+ (*(this->success)).resize(_size1077);
+ uint32_t _i1081;
+ for (_i1081 = 0; _i1081 < _size1077; ++_i1081)
{
- xfer += (*(this->success))[_i1057].read(iprot);
+ xfer += (*(this->success))[_i1081].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1058;
- ::apache::thrift::protocol::TType _etype1061;
- xfer += iprot->readListBegin(_etype1061, _size1058);
- this->success.resize(_size1058);
- uint32_t _i1062;
- for (_i1062 = 0; _i1062 < _size1058; ++_i1062)
+ uint32_t _size1082;
+ ::apache::thrift::protocol::TType _etype1085;
+ xfer += iprot->readListBegin(_etype1085, _size1082);
+ this->success.resize(_size1082);
+ uint32_t _i1086;
+ for (_i1086 = 0; _i1086 < _size1082; ++_i1086)
{
- xfer += this->success[_i1062].read(iprot);
+ xfer += this->success[_i1086].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1063;
- for (_iter1063 = this->success.begin(); _iter1063 != this->success.end(); ++_iter1063)
+ std::vector<FieldSchema> ::const_iterator _iter1087;
+ for (_iter1087 = this->success.begin(); _iter1087 != this->success.end(); ++_iter1087)
{
- xfer += (*_iter1063).write(oprot);
+ xfer += (*_iter1087).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1064;
- ::apache::thrift::protocol::TType _etype1067;
- xfer += iprot->readListBegin(_etype1067, _size1064);
- (*(this->success)).resize(_size1064);
- uint32_t _i1068;
- for (_i1068 = 0; _i1068 < _size1064; ++_i1068)
+ uint32_t _size1088;
+ ::apache::thrift::protocol::TType _etype1091;
+ xfer += iprot->readListBegin(_etype1091, _size1088);
+ (*(this->success)).resize(_size1088);
+ uint32_t _i1092;
+ for (_i1092 = 0; _i1092 < _size1088; ++_i1092)
{
- xfer += (*(this->success))[_i1068].read(iprot);
+ xfer += (*(this->success))[_i1092].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->primaryKeys.clear();
- uint32_t _size1069;
- ::apache::thrift::protocol::TType _etype1072;
- xfer += iprot->readListBegin(_etype1072, _size1069);
- this->primaryKeys.resize(_size1069);
- uint32_t _i1073;
- for (_i1073 = 0; _i1073 < _size1069; ++_i1073)
+ uint32_t _size1093;
+ ::apache::thrift::protocol::TType _etype1096;
+ xfer += iprot->readListBegin(_etype1096, _size1093);
+ this->primaryKeys.resize(_size1093);
+ uint32_t _i1097;
+ for (_i1097 = 0; _i1097 < _size1093; ++_i1097)
{
- xfer += this->primaryKeys[_i1073].read(iprot);
+ xfer += this->primaryKeys[_i1097].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->foreignKeys.clear();
- uint32_t _size1074;
- ::apache::thrift::protocol::TType _etype1077;
- xfer += iprot->readListBegin(_etype1077, _size1074);
- this->foreignKeys.resize(_size1074);
- uint32_t _i1078;
- for (_i1078 = 0; _i1078 < _size1074; ++_i1078)
+ uint32_t _size1098;
+ ::apache::thrift::protocol::TType _etype1101;
+ xfer += iprot->readListBegin(_etype1101, _size1098);
+ this->foreignKeys.resize(_size1098);
+ uint32_t _i1102;
+ for (_i1102 = 0; _i1102 < _size1098; ++_i1102)
{
- xfer += this->foreignKeys[_i1078].read(iprot);
+ xfer += this->foreignKeys[_i1102].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4558,14 +4558,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->uniqueConstraints.clear();
- uint32_t _size1079;
- ::apache::thrift::protocol::TType _etype1082;
- xfer += iprot->readListBegin(_etype1082, _size1079);
- this->uniqueConstraints.resize(_size1079);
- uint32_t _i1083;
- for (_i1083 = 0; _i1083 < _size1079; ++_i1083)
+ uint32_t _size1103;
+ ::apache::thrift::protocol::TType _etype1106;
+ xfer += iprot->readListBegin(_etype1106, _size1103);
+ this->uniqueConstraints.resize(_size1103);
+ uint32_t _i1107;
+ for (_i1107 = 0; _i1107 < _size1103; ++_i1107)
{
- xfer += this->uniqueConstraints[_i1083].read(iprot);
+ xfer += this->uniqueConstraints[_i1107].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4578,14 +4578,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->notNullConstraints.clear();
- uint32_t _size1084;
- ::apache::thrift::protocol::TType _etype1087;
- xfer += iprot->readListBegin(_etype1087, _size1084);
- this->notNullConstraints.resize(_size1084);
- uint32_t _i1088;
- for (_i1088 = 0; _i1088 < _size1084; ++_i1088)
+ uint32_t _size1108;
+ ::apache::thrift::protocol::TType _etype1111;
+ xfer += iprot->readListBegin(_etype1111, _size1108);
+ this->notNullConstraints.resize(_size1108);
+ uint32_t _i1112;
+ for (_i1112 = 0; _i1112 < _size1108; ++_i1112)
{
- xfer += this->notNullConstraints[_i1088].read(iprot);
+ xfer += this->notNullConstraints[_i1112].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4618,10 +4618,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter1089;
- for (_iter1089 = this->primaryKeys.begin(); _iter1089 != this->primaryKeys.end(); ++_iter1089)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter1113;
+ for (_iter1113 = this->primaryKeys.begin(); _iter1113 != this->primaryKeys.end(); ++_iter1113)
{
- xfer += (*_iter1089).write(oprot);
+ xfer += (*_iter1113).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4630,10 +4630,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
- std::vector<SQLForeignKey> ::const_iterator _iter1090;
- for (_iter1090 = this->foreignKeys.begin(); _iter1090 != this->foreignKeys.end(); ++_iter1090)
+ std::vector<SQLForeignKey> ::const_iterator _iter1114;
+ for (_iter1114 = this->foreignKeys.begin(); _iter1114 != this->foreignKeys.end(); ++_iter1114)
{
- xfer += (*_iter1090).write(oprot);
+ xfer += (*_iter1114).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4642,10 +4642,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraints.size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter1091;
- for (_iter1091 = this->uniqueConstraints.begin(); _iter1091 != this->uniqueConstraints.end(); ++_iter1091)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter1115;
+ for (_iter1115 = this->uniqueConstraints.begin(); _iter1115 != this->uniqueConstraints.end(); ++_iter1115)
{
- xfer += (*_iter1091).write(oprot);
+ xfer += (*_iter1115).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4654,10 +4654,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraints.size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter1092;
- for (_iter1092 = this->notNullConstraints.begin(); _iter1092 != this->notNullConstraints.end(); ++_iter1092)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter1116;
+ for (_iter1116 = this->notNullConstraints.begin(); _iter1116 != this->notNullConstraints.end(); ++_iter1116)
{
- xfer += (*_iter1092).write(oprot);
+ xfer += (*_iter1116).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4685,10 +4685,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter1093;
- for (_iter1093 = (*(this->primaryKeys)).begin(); _iter1093 != (*(this->primaryKeys)).end(); ++_iter1093)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter1117;
+ for (_iter1117 = (*(this->primaryKeys)).begin(); _iter1117 != (*(this->primaryKeys)).end(); ++_iter1117)
{
- xfer += (*_iter1093).write(oprot);
+ xfer += (*_iter1117).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4697,10 +4697,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
- std::vector<SQLForeignKey> ::const_iterator _iter1094;
- for (_iter1094 = (*(this->foreignKeys)).begin(); _iter1094 != (*(this->foreignKeys)).end(); ++_iter1094)
+ std::vector<SQLForeignKey> ::const_iterator _iter1118;
+ for (_iter1118 = (*(this->foreignKeys)).begin(); _iter1118 != (*(this->foreignKeys)).end(); ++_iter1118)
{
- xfer += (*_iter1094).write(oprot);
+ xfer += (*_iter1118).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4709,10 +4709,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->uniqueConstraints)).size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter1095;
- for (_iter1095 = (*(this->uniqueConstraints)).begin(); _iter1095 != (*(this->uniqueConstraints)).end(); ++_iter1095)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter1119;
+ for (_iter1119 = (*(this->uniqueConstraints)).begin(); _iter1119 != (*(this->uniqueConstraints)).end(); ++_iter1119)
{
- xfer += (*_iter1095).write(oprot);
+ xfer += (*_iter1119).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4721,10 +4721,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->notNullConstraints)).size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter1096;
- for (_iter1096 = (*(this->notNullConstraints)).begin(); _iter1096 != (*(this->notNullConstraints)).end(); ++_iter1096)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter1120;
+ for (_iter1120 = (*(this->notNullConstraints)).begin(); _iter1120 != (*(this->notNullConstraints)).end(); ++_iter1120)
{
- xfer += (*_iter1096).write(oprot);
+ xfer += (*_iter1120).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -6478,14 +6478,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partNames.clear();
- uint32_t _size1097;
- ::apache::thrift::protocol::TType _etype1100;
- xfer += iprot->readListBegin(_etype1100, _size1097);
- this->partNames.resize(_size1097);
- uint32_t _i1101;
- for (_i1101 = 0; _i1101 < _size1097; ++_i1101)
+ uint32_t _size1121;
+ ::apache::thrift::protocol::TType _etype1124;
+ xfer += iprot->readListBegin(_etype1124, _size1121);
+ this->partNames.resize(_size1121);
+ uint32_t _i1125;
+ for (_i1125 = 0; _i1125 < _size1121; ++_i1125)
{
- xfer += iprot->readString(this->partNames[_i1101]);
+ xfer += iprot->readString(this->partNames[_i1125]);
}
xfer += iprot->readListEnd();
}
@@ -6522,10 +6522,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
- std::vector<std::string> ::const_iterator _iter1102;
- for (_iter1102 = this->partNames.begin(); _iter1102 != this->partNames.end(); ++_iter1102)
+ std::vector<std::string> ::const_iterator _iter1126;
+ for (_iter1126 = this->partNames.begin(); _iter1126 != this->partNames.end(); ++_iter1126)
{
- xfer += oprot->writeString((*_iter1102));
+ xfer += oprot->writeString((*_iter1126));
}
xfer += oprot->writeListEnd();
}
@@ -6557,10 +6557,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partNames)).size()));
- std::vector<std::string> ::const_iterator _iter1103;
- for (_iter1103 = (*(this->partNames)).begin(); _iter1103 != (*(this->partNames)).end(); ++_iter1103)
+ std::vector<std::string> ::const_iterator _iter1127;
+ for (_iter1127 = (*(this->partNames)).begin(); _iter1127 != (*(this->partNames)).end(); ++_iter1127)
{
- xfer += oprot->writeString((*_iter1103));
+ xfer += oprot->writeString((*_iter1127));
}
xfer += oprot->writeListEnd();
}
@@ -6804,14 +6804,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1104;
- ::apache::thrift::protocol::TType _etype1107;
- xfer += iprot->readListBegin(_etype1107, _size1104);
- this->success.resize(_size1104);
- uint32_t _i1108;
- for (_i1108 = 0; _i1108 < _size1104; ++_i1108)
+ uint32_t _size1128;
+ ::apache::thrift::protocol::TType _etype1131;
+ xfer += iprot->readListBegin(_etype1131, _size1128);
+ this->success.resize(_size1128);
+ uint32_t _i1132;
+ for (_i1132 = 0; _i1132 < _size1128; ++_i1132)
{
- xfer += iprot->readString(this->success[_i1108]);
+ xfer += iprot->readString(this->success[_i1132]);
}
xfer += iprot->readListEnd();
}
@@ -6850,10 +6850,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1109;
- for (_iter1109 = this->success.begin(); _iter1109 != this->success.end(); ++_iter1109)
+ std::vector<std::string> ::const_iterator _iter1133;
+ for (_iter1133 = this->success.begin(); _iter1133 != this->success.end(); ++_iter1133)
{
- xfer += oprot->writeString((*_iter1109));
+ xfer += oprot->writeString((*_iter1133));
}
xfer += oprot->writeListEnd();
}
@@ -6898,14 +6898,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1110;
- ::apache::thrift::protocol::TType _etype1113;
- xfer += iprot->readListBegin(_etype1113, _size1110);
- (*(this->success)).resize(_size1110);
- uint32_t _i1114;
- for (_i1114 = 0; _i1114 < _size1110; ++_i1114)
+ uint32_t _size1134;
+ ::apache::thrift::protocol::TType _etype1137;
+ xfer += iprot->readListBegin(_etype1137, _size1134);
+ (*(this->success)).resize(_size1134);
+ uint32_t _i1138;
+ for (_i1138 = 0; _i1138 < _size1134; ++_i1138)
{
- xfer += iprot->readString((*(this->success))[_i1114]);
+ xfer += iprot->readString((*(this->success))[_i1138]);
}
xfer += iprot->readListEnd();
}
@@ -7075,407 +7075,407 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1115;
- ::apache::thrift::protocol::TType _etype1118;
- xfer += iprot->readListBegin(_etype1118, _size1115);
- this->success.resize(_size1115);
- uint32_t _i1119;
- for (_i1119 = 0; _i1119 < _size1115; ++_i1119)
- {
- xfer += iprot->readString(this->success[_i1119]);
- }
- xfer += iprot->readListEnd();
- }
- this->__isset.success = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- case 1:
- if (ftype == ::apache::thrift::protocol::T_STRUCT) {
- xfer += this->o1.read(iprot);
- this->__isset.o1 = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- default:
- xfer += iprot->skip(ftype);
- break;
- }
- xfer += iprot->readFieldEnd();
- }
-
- xfer += iprot->readStructEnd();
-
- return xfer;
-}
-
-uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
-
- uint32_t xfer = 0;
-
- xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_by_type_result");
-
- if (this->__isset.success) {
- xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
- {
- xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1120;
- for (_iter1120 = this->success.begin(); _iter1120 != this->success.end(); ++_iter1120)
- {
- xfer += oprot->writeString((*_iter1120));
- }
- xfer += oprot->writeListEnd();
- }
- xfer += oprot->writeFieldEnd();
- } else if (this->__isset.o1) {
- xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
- xfer += this->o1.write(oprot);
- xfer += oprot->writeFieldEnd();
- }
- xfer += oprot->writeFieldStop();
- xfer += oprot->writeStructEnd();
- return xfer;
-}
-
-
-ThriftHiveMetastore_get_tables_by_type_presult::~ThriftHiveMetastore_get_tables_by_type_presult() throw() {
-}
-
-
-uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
-
- apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
- uint32_t xfer = 0;
- std::string fname;
- ::apache::thrift::protocol::TType ftype;
- int16_t fid;
-
- xfer += iprot->readStructBegin(fname);
-
- using ::apache::thrift::protocol::TProtocolException;
-
-
- while (true)
- {
- xfer += iprot->readFieldBegin(fname, ftype, fid);
- if (ftype == ::apache::thrift::protocol::T_STOP) {
- break;
- }
- switch (fid)
- {
- case 0:
- if (ftype == ::apache::thrift::protocol::T_LIST) {
- {
- (*(this->success)).clear();
- uint32_t _size1121;
- ::apache::thrift::protocol::TType _etype1124;
- xfer += iprot->readListBegin(_etype1124, _size1121);
- (*(this->success)).resize(_size1121);
- uint32_t _i1125;
- for (_i1125 = 0; _i1125 < _size1121; ++_i1125)
- {
- xfer += iprot->readString((*(this->success))[_i1125]);
- }
- xfer += iprot->readListEnd();
- }
- this->__isset.success = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- case 1:
- if (ftype == ::apache::thrift::protocol::T_STRUCT) {
- xfer += this->o1.read(iprot);
- this->__isset.o1 = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- default:
- xfer += iprot->skip(ftype);
- break;
- }
- xfer += iprot->readFieldEnd();
- }
-
- xfer += iprot->readStructEnd();
-
- return xfer;
-}
-
-
-ThriftHiveMetastore_get_table_meta_args::~ThriftHiveMetastore_get_table_meta_args() throw() {
-}
-
-
-uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protocol::TProtocol* iprot) {
-
- apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
- uint32_t xfer = 0;
- std::string fname;
- ::apache::thrift::protocol::TType ftype;
- int16_t fid;
-
- xfer += iprot->readStructBegin(fname);
-
- using ::apache::thrift::protocol::TProtocolException;
-
-
- while (true)
- {
- xfer += iprot->readFieldBegin(fname, ftype, fid);
- if (ftype == ::apache::thrift::protocol::T_STOP) {
- break;
- }
- switch (fid)
- {
- case 1:
- if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->db_patterns);
- this->__isset.db_patterns = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- case 2:
- if (ftype == ::apache::thrift::protocol::T_STRING) {
- xfer += iprot->readString(this->tbl_patterns);
- this->__isset.tbl_patterns = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- case 3:
- if (ftype == ::apache::thrift::protocol::T_LIST) {
- {
- this->tbl_types.clear();
- uint32_t _size1126;
- ::apache::thrift::protocol::TType _etype1129;
- xfer += iprot->readListBegin(_etype1129, _size1126);
- this->tbl_types.resize(_size1126);
- uint32_t _i1130;
- for (_i1130 = 0; _i1130 < _size1126; ++_i1130)
- {
- xfer += iprot->readString(this->tbl_types[_i1130]);
- }
- xfer += iprot->readListEnd();
- }
- this->__isset.tbl_types = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- default:
- xfer += iprot->skip(ftype);
- break;
- }
- xfer += iprot->readFieldEnd();
- }
-
- xfer += iprot->readStructEnd();
-
- return xfer;
-}
-
-uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
- uint32_t xfer = 0;
- apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
- xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_args");
-
- xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1);
- xfer += oprot->writeString(this->db_patterns);
- xfer += oprot->writeFieldEnd();
-
- xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2);
- xfer += oprot->writeString(this->tbl_patterns);
- xfer += oprot->writeFieldEnd();
-
- xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
- {
- xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
- std::vector<std::string> ::const_iterator _iter1131;
- for (_iter1131 = this->tbl_types.begin(); _iter1131 != this->tbl_types.end(); ++_iter1131)
- {
- xfer += oprot->writeString((*_iter1131));
- }
- xfer += oprot->writeListEnd();
- }
- xfer += oprot->writeFieldEnd();
-
- xfer += oprot->writeFieldStop();
- xfer += oprot->writeStructEnd();
- return xfer;
-}
-
-
-ThriftHiveMetastore_get_table_meta_pargs::~ThriftHiveMetastore_get_table_meta_pargs() throw() {
-}
-
-
-uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
- uint32_t xfer = 0;
- apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
- xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_pargs");
-
- xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1);
- xfer += oprot->writeString((*(this->db_patterns)));
- xfer += oprot->writeFieldEnd();
-
- xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2);
- xfer += oprot->writeString((*(this->tbl_patterns)));
- xfer += oprot->writeFieldEnd();
-
- xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
- {
- xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
- std::vector<std::string> ::const_iterator _iter1132;
- for (_iter1132 = (*(this->tbl_types)).begin(); _iter1132 != (*(this->tbl_types)).end(); ++_iter1132)
- {
- xfer += oprot->writeString((*_iter1132));
- }
- xfer += oprot->writeListEnd();
- }
- xfer += oprot->writeFieldEnd();
-
- xfer += oprot->writeFieldStop();
- xfer += oprot->writeStructEnd();
- return xfer;
-}
-
-
-ThriftHiveMetastore_get_table_meta_result::~ThriftHiveMetastore_get_table_meta_result() throw() {
-}
-
-
-uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::protocol::TProtocol* iprot) {
-
- apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
- uint32_t xfer = 0;
- std::string fname;
- ::apache::thrift::protocol::TType ftype;
- int16_t fid;
-
- xfer += iprot->readStructBegin(fname);
-
- using ::apache::thrift::protocol::TProtocolException;
-
-
- while (true)
- {
- xfer += iprot->readFieldBegin(fname, ftype, fid);
- if (ftype == ::apache::thrift::protocol::T_STOP) {
- break;
- }
- switch (fid)
- {
- case 0:
- if (ftype == ::apache::thrift::protocol::T_LIST) {
- {
- this->success.clear();
- uint32_t _size1133;
- ::apache::thrift::protocol::TType _etype1136;
- xfer += iprot->readListBegin(_etype1136, _size1133);
- this->success.resize(_size1133);
- uint32_t _i1137;
- for (_i1137 = 0; _i1137 < _size1133; ++_i1137)
- {
- xfer += this->success[_i1137].read(iprot);
- }
- xfer += iprot->readListEnd();
- }
- this->__isset.success = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- case 1:
- if (ftype == ::apache::thrift::protocol::T_STRUCT) {
- xfer += this->o1.read(iprot);
- this->__isset.o1 = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- default:
- xfer += iprot->skip(ftype);
- break;
- }
- xfer += iprot->readFieldEnd();
- }
-
- xfer += iprot->readStructEnd();
-
- return xfer;
-}
-
-uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
-
- uint32_t xfer = 0;
-
- xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_result");
-
- if (this->__isset.success) {
- xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
- {
- xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<TableMeta> ::const_iterator _iter1138;
- for (_iter1138 = this->success.begin(); _iter1138 != this->success.end(); ++_iter1138)
- {
- xfer += (*_iter1138).write(oprot);
- }
- xfer += oprot->writeListEnd();
- }
- xfer += oprot->writeFieldEnd();
- } else if (this->__isset.o1) {
- xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
- xfer += this->o1.write(oprot);
- xfer += oprot->writeFieldEnd();
- }
- xfer += oprot->writeFieldStop();
- xfer += oprot->writeStructEnd();
- return xfer;
-}
-
-
-ThriftHiveMetastore_get_table_meta_presult::~ThriftHiveMetastore_get_table_meta_presult() throw() {
-}
-
-
-uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
-
- apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
- uint32_t xfer = 0;
- std::string fname;
- ::apache::thrift::protocol::TType ftype;
- int16_t fid;
-
- xfer += iprot->readStructBegin(fname);
-
- using ::apache::thrift::protocol::TProtocolException;
-
-
- while (true)
- {
- xfer += iprot->readFieldBegin(fname, ftype, fid);
- if (ftype == ::apache::thrift::protocol::T_STOP) {
- break;
- }
- switch (fid)
- {
- case 0:
- if (ftype == ::apache::thrift::protocol::T_LIST) {
- {
- (*(this->success)).clear();
uint32_t _size1139;
::apache::thrift::protocol::TType _etype1142;
xfer += iprot->readListBegin(_etype1142, _size1139);
- (*(this->success)).resize(_size1139);
+ this->success.resize(_size1139);
uint32_t _i1143;
for (_i1143 = 0; _i1143 < _size1139; ++_i1143)
{
- xfer += (*(this->success))[_i1143].read(iprot);
+ xfer += iprot->readString(this->success[_i1143]);
+ }
+ xfer += iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_by_type_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
+ std::vector<std::string> ::const_iterator _iter1144;
+ for (_iter1144 = this->success.begin(); _iter1144 != this->success.end(); ++_iter1144)
+ {
+ xfer += oprot->writeString((*_iter1144));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o1) {
+ xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->o1.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_get_tables_by_type_presult::~ThriftHiveMetastore_get_tables_by_type_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size1145;
+ ::apache::thrift::protocol::TType _etype1148;
+ xfer += iprot->readListBegin(_etype1148, _size1145);
+ (*(this->success)).resize(_size1145);
+ uint32_t _i1149;
+ for (_i1149 = 0; _i1149 < _size1145; ++_i1149)
+ {
+ xfer += iprot->readString((*(this->success))[_i1149]);
+ }
+ xfer += iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+
+ThriftHiveMetastore_get_table_meta_args::~ThriftHiveMetastore_get_table_meta_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->db_patterns);
+ this->__isset.db_patterns = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->tbl_patterns);
+ this->__isset.tbl_patterns = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->tbl_types.clear();
+ uint32_t _size1150;
+ ::apache::thrift::protocol::TType _etype1153;
+ xfer += iprot->readListBegin(_etype1153, _size1150);
+ this->tbl_types.resize(_size1150);
+ uint32_t _i1154;
+ for (_i1154 = 0; _i1154 < _size1150; ++_i1154)
+ {
+ xfer += iprot->readString(this->tbl_types[_i1154]);
+ }
+ xfer += iprot->readListEnd();
+ }
+ this->__isset.tbl_types = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_args");
+
+ xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->db_patterns);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->tbl_patterns);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
+ std::vector<std::string> ::const_iterator _iter1155;
+ for (_iter1155 = this->tbl_types.begin(); _iter1155 != this->tbl_types.end(); ++_iter1155)
+ {
+ xfer += oprot->writeString((*_iter1155));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_get_table_meta_pargs::~ThriftHiveMetastore_get_table_meta_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_pargs");
+
+ xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->db_patterns)));
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString((*(this->tbl_patterns)));
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
+ std::vector<std::string> ::const_iterator _iter1156;
+ for (_iter1156 = (*(this->tbl_types)).begin(); _iter1156 != (*(this->tbl_types)).end(); ++_iter1156)
+ {
+ xfer += oprot->writeString((*_iter1156));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_get_table_meta_result::~ThriftHiveMetastore_get_table_meta_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size1157;
+ ::apache::thrift::protocol::TType _etype1160;
+ xfer += iprot->readListBegin(_etype1160, _size1157);
+ this->success.resize(_size1157);
+ uint32_t _i1161;
+ for (_i1161 = 0; _i1161 < _size1157; ++_i1161)
+ {
+ xfer += this->success[_i1161].read(iprot);
+ }
+ xfer += iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
+ std::vector<TableMeta> ::const_iterator _iter1162;
+ for (_iter1162 = this->success.begin(); _iter1162 != this->success.end(); ++_iter1162)
+ {
+ xfer += (*_iter1162).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o1) {
+ xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->o1.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_get_table_meta_presult::~ThriftHiveMetastore_get_table_meta_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size1163;
+ ::apache::thrift::protocol::TType _etype1166;
+ xfer += iprot->readListBegin(_etype1166, _size1163);
+ (*(this->success)).resize(_size1163);
+ uint32_t _i1167;
+ for (_i1167 = 0; _i1167 < _size1163; ++_i1167)
+ {
+ xfer += (*(this->success))[_i1167].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -7613,14 +7613,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1144;
- ::apache::thrift::protocol::TType _etype1147;
- xfer += iprot->readListBegin(_etype1147, _size1144);
- this->success.resize(_size1144);
- uint32_t _i1148;
- for (_i1148 = 0; _i1148 < _size1144; ++_i1148)
+ uint32_t _size1168;
+ ::apache::thrift::protocol::TType _etype1171;
+ xfer += iprot->readListBegin(_etype1171, _size1168);
+ this->success.resize(_size1168);
+ uint32_t _i1172;
+ for (_i1172 = 0; _i1172 < _size1168; ++_i1172)
{
- xfer += iprot->readString(this->success[_i1148]);
+ xfer += iprot->readString(this->success[_i1172]);
}
xfer += iprot->readListEnd();
}
@@ -7659,10 +7659,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1149;
- for (_iter1149 = this->success.begin(); _iter1149 != this->success.end(); ++_iter1149)
+ std::vector<std::string> ::const_iterator _iter1173;
+ for (_iter1173 = this->success.begin(); _iter1173 != this->success.end(); ++_iter1173)
{
- xfer += oprot->writeString((*_iter1149));
+ xfer += oprot->writeString((*_iter1173));
}
xfer += oprot->writeListEnd();
}
@@ -7707,14 +7707,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1150;
- ::apache::thrift::protocol::TType _etype1153;
- xfer += iprot->readListBegin(_etype1153, _size1150);
- (*(this->success)).resize(_size1150);
- uint32_t _i1154;
- for (_i1154 = 0; _i1154 < _size1150; ++_i1154)
+ uint32_t _size1174;
+ ::apache::thrift::protocol::TType _etype1177;
+ xfer += iprot->readListBegin(_etype1177, _size1174);
+ (*(this->success)).resize(_size1174);
+ uint32_t _i1178;
+ for (_i1178 = 0; _i1178 < _size1174; ++_i1178)
{
- xfer += iprot->readString((*(this->success))[_i1154]);
+ xfer += iprot->readString((*(this->success))[_i1178]);
}
xfer += iprot->readListEnd();
}
@@ -8024,14 +8024,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_names.clear();
- uint32_t _size1155;
- ::apache::thrift::protocol::TType _etype1158;
- xfer += iprot->readListBegin(_etype1158, _size1155);
- this->tbl_names.resize(_size1155);
- uint32_t _i1159;
- for (_i1159 = 0; _i1159 < _size1155; ++_i1159)
+ uint32_t _size1179;
+ ::apache::thrift::protocol::TType _etype1182;
+ xfer += iprot->readListBegin(_etype1182, _size1179);
+ this->tbl_names.resize(_size1179);
+ uint32_t _i1183;
+ for (_i1183 = 0; _i1183 < _size1179; ++_i1183)
{
- xfer += iprot->readString(this->tbl_names[_i1159]);
+ xfer += iprot->readString(this->tbl_names[_i1183]);
}
xfer += iprot->readListEnd();
}
@@ -8064,10 +8064,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
- std::vector<std::string> ::const_iterator _iter1160;
- for (_iter1160 = this->tbl_names.begin(); _iter1160 != this->tbl_names.end(); ++_iter1160)
+ std::vector<std::string> ::const_iterator _iter1184;
+ for (_iter1184 = this->tbl_names.begin(); _iter1184 != this->tbl_names.end(); ++_iter1184)
{
- xfer += oprot->writeString((*_iter1160));
+ xfer += oprot->writeString((*_iter1184));
}
xfer += oprot->writeListEnd();
}
@@ -8095,10 +8095,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
- std::vector<std::string> ::const_iterator _iter1161;
- for (_iter1161 = (*(this->tbl_names)).begin(); _iter1161 != (*(this->tbl_names)).end(); ++_iter1161)
+ std::vector<std::string> ::const_iterator _iter1185;
+ for (_iter1185 = (*(this->tbl_names)).begin(); _iter1185 != (*(this->tbl_names)).end(); ++_iter1185)
{
- xfer += oprot->writeString((*_iter1161));
+ xfer += oprot->writeString((*_iter1185));
}
xfer += oprot->writeListEnd();
}
@@ -8139,14 +8139,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1162;
- ::apache::thrift::protocol::TType _etype1165;
- xfer += iprot->readListBegin(_etype1165, _size1162);
- this->success.resize(_size1162);
- uint32_t _i1166;
- for (_i1166 = 0; _i1166 < _size1162; ++_i1166)
+ uint32_t _size1186;
+ ::apache::thrift::protocol::TType _etype1189;
+ xfer += iprot->readListBegin(_etype1189, _size1186);
+ this->success.resize(_size1186);
+ uint32_t _i1190;
+ for (_i1190 = 0; _i1190 < _size1186; ++_i1190)
{
- xfer += this->success[_i1166].read(iprot);
+ xfer += this->success[_i1190].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -8177,10 +8177,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<Table> ::const_iterator _iter1167;
- for (_iter1167 = this->success.begin(); _iter1167 != this->success.end(); ++_iter1167)
+ std::vector<Table> ::const_iterator _iter1191;
+ for (_iter1191 = this->success.begin(); _iter1191 != this->success.end(); ++_iter1191)
{
- xfer += (*_iter1167).write(oprot);
+ xfer += (*_iter1191).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8221,14 +8221,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1168;
- ::apache::thrift::protocol::TType _etype1171;
- xfer += iprot->readListBegin(_etype1171, _size1168);
- (*(this->success)).resize(_size1168);
- uint32_t _i1172;
- for (_i1172 = 0; _i1172 < _size1168; ++_i1172)
+ uint32_t _size1192;
+ ::apache::thrift::protocol::TType _etype1195;
+ xfer += iprot->readListBegin(_etype1195, _size1192);
+ (*(this->success)).resize(_size1192);
+ uint32_t _i1196;
+ for (_i1196 = 0; _i1196 < _size1192; ++_i1196)
{
- xfer += (*(this->success))[_i1172].read(iprot);
+ xfer += (*(this->success))[_i1196].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -8864,14 +8864,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1173;
- ::apache::thrift::protocol::TType _etype1176;
- xfer += iprot->readListBegin(_etype1176, _size1173);
- this->success.resize(_size1173);
- uint32_t _i1177;
- for (_i1177 = 0; _i1177 < _size1173; ++_i1177)
+ uint32_t _size1197;
+ ::apache::thrift::protocol::TType _etype1200;
+ xfer += iprot->readListBegin(_etype1200, _size1197);
+ this->success.resize(_size1197);
+ uint32_t _i1201;
+ for (_i1201 = 0; _i1201 < _size1197; ++_i1201)
{
- xfer += iprot->readString(this->success[_i1177]);
+ xfer += iprot->readString(this->success[_i1201]);
}
xfer += iprot->readListEnd();
}
@@ -8926,10 +8926,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1178;
- for (_iter1178 = this->success.begin(); _iter1178 != this->success.end(); ++_iter1178)
+ std::vector<std::string> ::const_iterator _iter1202;
+ for (_iter1202 = this->success.begin(); _iter1202 != this->success.end(); ++_iter1202)
{
- xfer += oprot->writeString((*_iter1178));
+ xfer += oprot->writeString((*_iter1202));
}
xfer += oprot->writeListEnd();
}
@@ -8982,14 +8982,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1179;
- ::apache::thrift::protocol::TType _etype1182;
- xfer += iprot->readListBegin(_etype1182, _size1179);
- (*(this->success)).resize(_size1179);
- uint32_t _i1183;
- for (_i1183 = 0; _i1183 < _size1179; ++_i1183)
+ uint32_t _size1203;
+ ::apache::thrift::protocol::TType _etype1206;
+ xfer += iprot->readListBegin(_etype1206, _size1203);
+ (*(this->success)).resize(_size1203);
+ uint32_t _i1207;
+ for (_i1207 = 0; _i1207 < _size1203; ++_i1207)
{
- xfer += iprot->readString((*(this->success))[_i1183]);
+ xfer += iprot->readString((*(this->success))[_i1207]);
}
xfer += iprot->readListEnd();
}
@@ -10323,14 +10323,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size1184;
- ::apache::thrift::protocol::TType _etype1187;
- xfer += iprot->readListBegin(_etype1187, _size1184);
- this->new_parts.resize(_size1184);
- uint32_t _i1188;
- for (_i1188 = 0; _i1188 < _size1184; ++_i1188)
+ uint32_t _size1208;
+ ::apache::thrift::protocol::TType _etype1211;
+ xfer += iprot->readListBegin(_etype1211, _size1208);
+ this->new_parts.resize(_size1208);
+ uint32_t _i1212;
+ for (_i1212 = 0; _i1212 < _size1208; ++_i1212)
{
- xfer += this->new_parts[_i1188].read(iprot);
+ xfer += this->new_parts[_i1212].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10359,10 +10359,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<Partition> ::const_iterator _iter1189;
- for (_iter1189 = this->new_parts.begin(); _iter1189 != this->new_parts.end(); ++_iter1189)
+ std::vector<Partition> ::const_iterator _iter1213;
+ for (_iter1213 = this->new_parts.begin(); _iter1213 != this->new_parts.end(); ++_iter1213)
{
- xfer += (*_iter1189).write(oprot);
+ xfer += (*_iter1213).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10386,10 +10386,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<Partition> ::const_iterator _iter1190;
- for (_iter1190 = (*(this->new_parts)).begin(); _iter1190 != (*(this->new_parts)).end(); ++_iter1190)
+ std::vector<Partition> ::const_iterator _iter1214;
+ for (_iter1214 = (*(this->new_parts)).begin(); _iter1214 != (*(this->new_parts)).end(); ++_iter1214)
{
- xfer += (*_iter1190).write(oprot);
+ xfer += (*_iter1214).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10598,14 +10598,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size1191;
- ::apache::thrift::protocol::TType _etype1194;
- xfer += iprot->readListBegin(_etype1194, _size1191);
- this->new_parts.resize(_size1191);
- uint32_t _i1195;
- for (_i1195 = 0; _i1195 < _size1191; ++_i1195)
+ uint32_t _size1215;
+ ::apache::thrift::protocol::TType _etype1218;
+ xfer += iprot->readListBegin(_etype1218, _size1215);
+ this->new_parts.resize(_size1215);
+ uint32_t _i1219;
+ for (_i1219 = 0; _i1219 < _size1215; ++_i1219)
{
- xfer += this->new_parts[_i1195].read(iprot);
+ xfer += this->new_parts[_i1219].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10634,10 +10634,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<PartitionSpec> ::const_iterator _iter1196;
- for (_iter1196 = this->new_parts.begin(); _iter1196 != this->new_parts.end(); ++_iter1196)
+ std::vector<PartitionSpec> ::const_iterator _iter1220;
+ for (_iter1220 = this->new_parts.begin(); _iter1220 != this->new_parts.end(); ++_iter1220)
{
- xfer += (*_iter1196).write(oprot);
+ xfer += (*_iter1220).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10661,10 +10661,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<PartitionSpec> ::const_iterator _iter1197;
- for (_iter1197 = (*(this->new_parts)).begin(); _iter1197 != (*(this->new_parts)).end(); ++_iter1197)
+ std::vector<PartitionSpec> ::const_iterator _iter1221;
+ for (_iter1221 = (*(this->new_parts)).begin(); _iter1221 != (*(this->new_parts)).end(); ++_iter1221)
{
- xfer += (*_iter1197).write(oprot);
+ xfer += (*_iter1221).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10889,14 +10889,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1198;
- ::apache::thrift::protocol::TType _etype1201;
- xfer += iprot->readListBegin(_etype1201, _size1198);
- this->part_vals.resize(_size1198);
- uint32_t _i1202;
- for (_i1202 = 0; _i1202 < _size1198; ++_i1202)
+ uint32_t _size1222;
+ ::apache::thrift::protocol::TType _etype1225;
+ xfer += iprot->readListBegin(_etype1225, _size1222);
+ this->part_vals.resize(_size1222);
+ uint32_t _i1226;
+ for (_i1226 = 0; _i1226 < _size1222; ++_i1226)
{
- xfer += iprot->readString(this->part_vals[_i1202]);
+ xfer += iprot->readString(this->part_vals[_i1226]);
}
xfer += iprot->readListEnd();
}
@@ -10933,10 +10933,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1203;
- for (_iter1203 = this->part_vals.begin(); _iter1203 != this->part_vals.end(); ++_iter1203)
+ std::vector<std::string> ::const_iterator _iter1227;
+ for (_iter1227 = this->part_vals.begin(); _iter1227 != this->part_vals.end(); ++_iter1227)
{
- xfer += oprot->writeString((*_iter1203));
+ xfer += oprot->writeString((*_iter1227));
}
xfer += oprot->writeListEnd();
}
@@ -10968,10 +10968,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1204;
- for (_iter1204 = (*(this->part_vals)).begin(); _iter1204 != (*(this->part_vals)).end(); ++_iter1204)
+ std::vector<std::string> ::const_iterator _iter1228;
+ for (_iter1228 = (*(this->part_vals)).begin(); _iter1228 != (*(this->part_vals)).end(); ++_iter1228)
{
- xfer += oprot->writeString((*_iter1204));
+ xfer += oprot->writeString((*_iter1228));
}
xfer += oprot->writeListEnd();
}
@@ -11443,14 +11443,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1205;
- ::apache::thrift::protocol::TType _etype1208;
- xfer += iprot->readListBegin(_etype1208, _size1205);
- this->part_vals.resize(_size1205);
- uint32_t _i1209;
- for (_i1209 = 0; _i1209 < _size1205; ++_i1209)
+ uint32_t _size1229;
+ ::apache::thrift::protocol::TType _etype1232;
+ xfer += iprot->readListBegin(_etype1232, _size1229);
+ this->part_vals.resize(_size1229);
+ uint32_t _i1233;
+ for (_i1233 = 0; _i1233 < _size1229; ++_i1233)
{
- xfer += iprot->readString(this->part_vals[_i1209]);
+ xfer += iprot->readString(this->part_vals[_i1233]);
}
xfer += iprot->readListEnd();
}
@@ -11495,10 +11495,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1210;
- for (_iter1210 = this->part_vals.begin(); _iter1210 != this->part_vals.end(); ++_iter1210)
+ std::vector<std::string> ::const_iterator _iter1234;
+ for (_iter1234 = this->part_vals.begin(); _iter1234 != this->part_vals.end(); ++_iter1234)
{
- xfer += oprot->writeString((*_iter1210));
+ xfer += oprot->writeString((*_iter1234));
}
xfer += oprot->writeListEnd();
}
@@ -11534,10 +11534,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1211;
- for (_iter1211 = (*(this->part_vals)).begin(); _iter1211 != (*(this->part_vals)).end(); ++_iter1211)
+ std::vector<std::string> ::const_iterator _iter1235;
+ for (_iter1235 = (*(this->part_vals)).begin(); _iter1235 != (*(this->part_vals)).end(); ++_iter1235)
{
- xfer += oprot->writeString((*_iter1211));
+ xfer += oprot->writeString((*_iter1235));
}
xfer += oprot->writeListEnd();
}
@@ -12340,14 +12340,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1212;
- ::apache::thrift::protocol::TType _etype1215;
- xfer += iprot->readListBegin(_etype1215, _size1212);
- this->part_vals.resize(_size1212);
- uint32_t _i1216;
- for (_i1216 = 0; _i1216 < _size1212; ++_i1216)
+ uint32_t _size1236;
+ ::apache::thrift::protocol::TType _etype1239;
+ xfer += iprot->readListBegin(_etype1239, _size1236);
+ this->part_vals.resize(_size1236);
+ uint32_t _i1240;
+ for (_i1240 = 0; _i1240 < _size1236; ++_i1240)
{
- xfer += iprot->readString(this->part_vals[_i1216]);
+ xfer += iprot->readString(this->part_vals[_i1240]);
}
xfer += iprot->readListEnd();
}
@@ -12392,10 +12392,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1217;
- for (_iter1217 = this->part_vals.begin(); _iter1217 != this->part_vals.end(); ++_iter1217)
+ std::vector<std::string> ::const_iterator _iter1241;
+ for (_iter1241 = this->part_vals.begin(); _iter1241 != this->part_vals.end(); ++_iter1241)
{
- xfer += oprot->writeString((*_iter1217));
+ xfer += oprot->writeString((*_iter1241));
}
xfer += oprot->writeListEnd();
}
@@ -12431,10 +12431,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1218;
- for (_iter1218 = (*(this->part_vals)).begin(); _iter1218 != (*(this->part_vals)).end(); ++_iter1218)
+ std::vector<std::string> ::const_iterator _iter1242;
+ for (_iter1242 = (*(this->part_vals)).begin(); _iter1242 != (*(this->part_vals)).end(); ++_iter1242)
{
- xfer += oprot->writeString((*_iter1218));
+ xfer += oprot->writeString((*_iter1242));
}
xfer += oprot->writeListEnd();
}
@@ -12643,14 +12643,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1219;
- ::apache::thrift::protocol::TType _etype1222;
- xfer += iprot->readListBegin(_etype1222, _size1219);
- this->part_vals.resize(_size1219);
- uint32_t _i1223;
- for (_i1223 = 0; _i1223 < _size1219; ++_i1223)
+ uint32_t _size1243;
+ ::apache::thrift::protocol::TType _etype1246;
+ xfer += iprot->readListBegin(_etype1246, _size1243);
+ this->part_vals.resize(_size1243);
+ uint32_t _i1247;
+ for (_i1247 = 0; _i1247 < _size1243; ++_i1247)
{
- xfer += iprot->readString(this->part_vals[_i1223]);
+ xfer += iprot->readString(this->part_vals[_i1247]);
}
xfer += iprot->readListEnd();
}
@@ -12703,10 +12703,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1224;
- for (_iter1224 = this->part_vals.begin(); _iter1224 != this->part_vals.end(); ++_iter1224)
+ std::vector<std::string> ::const_iterator _iter1248;
+ for (_iter1248 = this->part_vals.begin(); _iter1248 != this->part_vals.end(); ++_iter1248)
{
- xfer += oprot->writeString((*_iter1224));
+ xfer += oprot->writeString((*_iter1248));
}
xfer += oprot->writeListEnd();
}
@@ -12746,10 +12746,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1225;
- for (_iter1225 = (*(this->part_vals)).begin(); _iter1225 != (*(this->part_vals)).end(); ++_iter1225)
+ std::vector<std::string> ::const_iterator _iter1249;
+ for (_iter1249 = (*(this->part_vals)).begin(); _iter1249 != (*(this->part_vals)).end(); ++_iter1249)
{
- xfer += oprot->writeString((*_iter1225));
+ xfer += oprot->writeString((*_iter1249));
}
xfer += oprot->writeListEnd();
}
@@ -13755,14 +13755,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1226;
- ::apache::thrift::protocol::TType _etype1229;
- xfer += iprot->readListBegin(_et
<TRUNCATED>
[10/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 5c4284b..dc9540d 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -195,6 +195,12 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService
virtual void alter_wm_trigger(WMAlterTriggerResponse& _return, const WMAlterTriggerRequest& request) = 0;
virtual void drop_wm_trigger(WMDropTriggerResponse& _return, const WMDropTriggerRequest& request) = 0;
virtual void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request) = 0;
+ virtual void create_wm_pool(WMCreatePoolResponse& _return, const WMCreatePoolRequest& request) = 0;
+ virtual void alter_wm_pool(WMAlterPoolResponse& _return, const WMAlterPoolRequest& request) = 0;
+ virtual void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) = 0;
+ virtual void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) = 0;
+ virtual void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) = 0;
+ virtual void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) = 0;
};
class ThriftHiveMetastoreIfFactory : virtual public ::facebook::fb303::FacebookServiceIfFactory {
@@ -771,6 +777,24 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& /* _return */, const WMGetTriggersForResourePlanRequest& /* request */) {
return;
}
+ void create_wm_pool(WMCreatePoolResponse& /* _return */, const WMCreatePoolRequest& /* request */) {
+ return;
+ }
+ void alter_wm_pool(WMAlterPoolResponse& /* _return */, const WMAlterPoolRequest& /* request */) {
+ return;
+ }
+ void drop_wm_pool(WMDropPoolResponse& /* _return */, const WMDropPoolRequest& /* request */) {
+ return;
+ }
+ void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& /* _return */, const WMCreateOrUpdateMappingRequest& /* request */) {
+ return;
+ }
+ void drop_wm_mapping(WMDropMappingResponse& /* _return */, const WMDropMappingRequest& /* request */) {
+ return;
+ }
+ void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& /* _return */, const WMCreateOrDropTriggerToPoolMappingRequest& /* request */) {
+ return;
+ }
};
typedef struct _ThriftHiveMetastore_getMetaConf_args__isset {
@@ -22081,242 +22105,1042 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_presult {
};
-class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient {
+typedef struct _ThriftHiveMetastore_create_wm_pool_args__isset {
+ _ThriftHiveMetastore_create_wm_pool_args__isset() : request(false) {}
+ bool request :1;
+} _ThriftHiveMetastore_create_wm_pool_args__isset;
+
+class ThriftHiveMetastore_create_wm_pool_args {
public:
- ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) :
- ::facebook::fb303::FacebookServiceClient(prot, prot) {}
- ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : ::facebook::fb303::FacebookServiceClient(iprot, oprot) {}
- boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() {
- return piprot_;
+
+ ThriftHiveMetastore_create_wm_pool_args(const ThriftHiveMetastore_create_wm_pool_args&);
+ ThriftHiveMetastore_create_wm_pool_args& operator=(const ThriftHiveMetastore_create_wm_pool_args&);
+ ThriftHiveMetastore_create_wm_pool_args() {
}
- boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() {
- return poprot_;
+
+ virtual ~ThriftHiveMetastore_create_wm_pool_args() throw();
+ WMCreatePoolRequest request;
+
+ _ThriftHiveMetastore_create_wm_pool_args__isset __isset;
+
+ void __set_request(const WMCreatePoolRequest& val);
+
+ bool operator == (const ThriftHiveMetastore_create_wm_pool_args & rhs) const
+ {
+ if (!(request == rhs.request))
+ return false;
+ return true;
}
- void getMetaConf(std::string& _return, const std::string& key);
- void send_getMetaConf(const std::string& key);
- void recv_getMetaConf(std::string& _return);
- void setMetaConf(const std::string& key, const std::string& value);
- void send_setMetaConf(const std::string& key, const std::string& value);
- void recv_setMetaConf();
- void create_database(const Database& database);
- void send_create_database(const Database& database);
- void recv_create_database();
- void get_database(Database& _return, const std::string& name);
- void send_get_database(const std::string& name);
- void recv_get_database(Database& _return);
- void drop_database(const std::string& name, const bool deleteData, const bool cascade);
- void send_drop_database(const std::string& name, const bool deleteData, const bool cascade);
- void recv_drop_database();
- void get_databases(std::vector<std::string> & _return, const std::string& pattern);
- void send_get_databases(const std::string& pattern);
- void recv_get_databases(std::vector<std::string> & _return);
- void get_all_databases(std::vector<std::string> & _return);
- void send_get_all_databases();
- void recv_get_all_databases(std::vector<std::string> & _return);
- void alter_database(const std::string& dbname, const Database& db);
- void send_alter_database(const std::string& dbname, const Database& db);
- void recv_alter_database();
- void get_type(Type& _return, const std::string& name);
- void send_get_type(const std::string& name);
- void recv_get_type(Type& _return);
- bool create_type(const Type& type);
- void send_create_type(const Type& type);
- bool recv_create_type();
- bool drop_type(const std::string& type);
- void send_drop_type(const std::string& type);
- bool recv_drop_type();
- void get_type_all(std::map<std::string, Type> & _return, const std::string& name);
- void send_get_type_all(const std::string& name);
- void recv_get_type_all(std::map<std::string, Type> & _return);
- void get_fields(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name);
- void send_get_fields(const std::string& db_name, const std::string& table_name);
- void recv_get_fields(std::vector<FieldSchema> & _return);
- void get_fields_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
- void send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
- void recv_get_fields_with_environment_context(std::vector<FieldSchema> & _return);
- void get_schema(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name);
- void send_get_schema(const std::string& db_name, const std::string& table_name);
- void recv_get_schema(std::vector<FieldSchema> & _return);
- void get_schema_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
- void send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
- void recv_get_schema_with_environment_context(std::vector<FieldSchema> & _return);
- void create_table(const Table& tbl);
- void send_create_table(const Table& tbl);
- void recv_create_table();
- void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context);
- void send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context);
- void recv_create_table_with_environment_context();
- void create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints);
- void send_create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints);
- void recv_create_table_with_constraints();
- void drop_constraint(const DropConstraintRequest& req);
- void send_drop_constraint(const DropConstraintRequest& req);
- void recv_drop_constraint();
- void add_primary_key(const AddPrimaryKeyRequest& req);
- void send_add_primary_key(const AddPrimaryKeyRequest& req);
- void recv_add_primary_key();
- void add_foreign_key(const AddForeignKeyRequest& req);
- void send_add_foreign_key(const AddForeignKeyRequest& req);
- void recv_add_foreign_key();
- void add_unique_constraint(const AddUniqueConstraintRequest& req);
- void send_add_unique_constraint(const AddUniqueConstraintRequest& req);
- void recv_add_unique_constraint();
- void add_not_null_constraint(const AddNotNullConstraintRequest& req);
- void send_add_not_null_constraint(const AddNotNullConstraintRequest& req);
- void recv_add_not_null_constraint();
- void drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
- void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
- void recv_drop_table();
- void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
- void send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
- void recv_drop_table_with_environment_context();
- void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
- void send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
- void recv_truncate_table();
- void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern);
- void send_get_tables(const std::string& db_name, const std::string& pattern);
- void recv_get_tables(std::vector<std::string> & _return);
- void get_tables_by_type(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType);
- void send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType);
- void recv_get_tables_by_type(std::vector<std::string> & _return);
- void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
- void send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
- void recv_get_table_meta(std::vector<TableMeta> & _return);
- void get_all_tables(std::vector<std::string> & _return, const std::string& db_name);
- void send_get_all_tables(const std::string& db_name);
- void recv_get_all_tables(std::vector<std::string> & _return);
- void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name);
- void send_get_table(const std::string& dbname, const std::string& tbl_name);
- void recv_get_table(Table& _return);
- void get_table_objects_by_name(std::vector<Table> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names);
- void send_get_table_objects_by_name(const std::string& dbname, const std::vector<std::string> & tbl_names);
- void recv_get_table_objects_by_name(std::vector<Table> & _return);
- void get_table_req(GetTableResult& _return, const GetTableRequest& req);
- void send_get_table_req(const GetTableRequest& req);
- void recv_get_table_req(GetTableResult& _return);
- void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req);
- void send_get_table_objects_by_name_req(const GetTablesRequest& req);
- void recv_get_table_objects_by_name_req(GetTablesResult& _return);
- void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables);
- void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables);
- void recv_get_table_names_by_filter(std::vector<std::string> & _return);
- void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl);
- void send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl);
- void recv_alter_table();
- void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context);
- void send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context);
- void recv_alter_table_with_environment_context();
- void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade);
- void send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade);
- void recv_alter_table_with_cascade();
- void add_partition(Partition& _return, const Partition& new_part);
- void send_add_partition(const Partition& new_part);
- void recv_add_partition(Partition& _return);
- void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context);
- void send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context);
- void recv_add_partition_with_environment_context(Partition& _return);
- int32_t add_partitions(const std::vector<Partition> & new_parts);
- void send_add_partitions(const std::vector<Partition> & new_parts);
- int32_t recv_add_partitions();
- int32_t add_partitions_pspec(const std::vector<PartitionSpec> & new_parts);
- void send_add_partitions_pspec(const std::vector<PartitionSpec> & new_parts);
- int32_t recv_add_partitions_pspec();
- void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
- void send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
- void recv_append_partition(Partition& _return);
- void add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request);
- void send_add_partitions_req(const AddPartitionsRequest& request);
- void recv_add_partitions_req(AddPartitionsResult& _return);
- void append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const EnvironmentContext& environment_context);
- void send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const EnvironmentContext& environment_context);
- void recv_append_partition_with_environment_context(Partition& _return);
- void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
- void send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
- void recv_append_partition_by_name(Partition& _return);
- void append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context);
- void send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context);
- void recv_append_partition_by_name_with_environment_context(Partition& _return);
- bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData);
- void send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData);
- bool recv_drop_partition();
- bool drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData, const EnvironmentContext& environment_context);
- void send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData, const EnvironmentContext& environment_context);
- bool recv_drop_partition_with_environment_context();
- bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData);
- void send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData);
- bool recv_drop_partition_by_name();
- bool drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context);
- void send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context);
- bool recv_drop_partition_by_name_with_environment_context();
- void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req);
- void send_drop_partitions_req(const DropPartitionsRequest& req);
- void recv_drop_partitions_req(DropPartitionsResult& _return);
- void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
- void send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
- void recv_get_partition(Partition& _return);
- void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
- void send_exchange_partition(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
- void recv_exchange_partition(Partition& _return);
- void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
- void send_exchange_partitions(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
- void recv_exchange_partitions(std::vector<Partition> & _return);
- void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
- void send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
- void recv_get_partition_with_auth(Partition& _return);
- void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
- void send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
- void recv_get_partition_by_name(Partition& _return);
- void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
- void send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
- void recv_get_partitions(std::vector<Partition> & _return);
- void get_partitions_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
- void send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
- void recv_get_partitions_with_auth(std::vector<Partition> & _return);
- void get_partitions_pspec(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts);
- void send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts);
- void recv_get_partitions_pspec(std::vector<PartitionSpec> & _return);
- void get_partition_names(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
- void send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
- void recv_get_partition_names(std::vector<std::string> & _return);
- void get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request);
- void send_get_partition_values(const PartitionValuesRequest& request);
- void recv_get_partition_values(PartitionValuesResponse& _return);
- void get_partitions_ps(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
- void send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
- void recv_get_partitions_ps(std::vector<Partition> & _return);
- void get_partitions_ps_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
- void send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
- void recv_get_partitions_ps_with_auth(std::vector<Partition> & _return);
- void get_partition_names_ps(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
- void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
- void recv_get_partition_names_ps(std::vector<std::string> & _return);
- void get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
- void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
- void recv_get_partitions_by_filter(std::vector<Partition> & _return);
- void get_part_specs_by_filter(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts);
- void send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts);
- void recv_get_part_specs_by_filter(std::vector<PartitionSpec> & _return);
- void get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req);
- void send_get_partitions_by_expr(const PartitionsByExprRequest& req);
- void recv_get_partitions_by_expr(PartitionsByExprResult& _return);
- int32_t get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter);
- void send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter);
- int32_t recv_get_num_partitions_by_filter();
- void get_partitions_by_names(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & names);
- void send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & names);
- void recv_get_partitions_by_names(std::vector<Partition> & _return);
- void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
- void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
- void recv_alter_partition();
- void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
- void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
- void recv_alter_partitions();
- void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
- void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
- void recv_alter_partitions_with_environment_context();
- void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
- void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
- void recv_alter_partition_with_environment_context();
+ bool operator != (const ThriftHiveMetastore_create_wm_pool_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_create_wm_pool_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_create_wm_pool_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_create_wm_pool_pargs() throw();
+ const WMCreatePoolRequest* request;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_wm_pool_result__isset {
+ _ThriftHiveMetastore_create_wm_pool_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+ bool o4 :1;
+} _ThriftHiveMetastore_create_wm_pool_result__isset;
+
+class ThriftHiveMetastore_create_wm_pool_result {
+ public:
+
+ ThriftHiveMetastore_create_wm_pool_result(const ThriftHiveMetastore_create_wm_pool_result&);
+ ThriftHiveMetastore_create_wm_pool_result& operator=(const ThriftHiveMetastore_create_wm_pool_result&);
+ ThriftHiveMetastore_create_wm_pool_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_create_wm_pool_result() throw();
+ WMCreatePoolResponse success;
+ AlreadyExistsException o1;
+ NoSuchObjectException o2;
+ InvalidObjectException o3;
+ MetaException o4;
+
+ _ThriftHiveMetastore_create_wm_pool_result__isset __isset;
+
+ void __set_success(const WMCreatePoolResponse& val);
+
+ void __set_o1(const AlreadyExistsException& val);
+
+ void __set_o2(const NoSuchObjectException& val);
+
+ void __set_o3(const InvalidObjectException& val);
+
+ void __set_o4(const MetaException& val);
+
+ bool operator == (const ThriftHiveMetastore_create_wm_pool_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ if (!(o4 == rhs.o4))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_create_wm_pool_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_create_wm_pool_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_wm_pool_presult__isset {
+ _ThriftHiveMetastore_create_wm_pool_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+ bool o4 :1;
+} _ThriftHiveMetastore_create_wm_pool_presult__isset;
+
+class ThriftHiveMetastore_create_wm_pool_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_create_wm_pool_presult() throw();
+ WMCreatePoolResponse* success;
+ AlreadyExistsException o1;
+ NoSuchObjectException o2;
+ InvalidObjectException o3;
+ MetaException o4;
+
+ _ThriftHiveMetastore_create_wm_pool_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_wm_pool_args__isset {
+ _ThriftHiveMetastore_alter_wm_pool_args__isset() : request(false) {}
+ bool request :1;
+} _ThriftHiveMetastore_alter_wm_pool_args__isset;
+
+class ThriftHiveMetastore_alter_wm_pool_args {
+ public:
+
+ ThriftHiveMetastore_alter_wm_pool_args(const ThriftHiveMetastore_alter_wm_pool_args&);
+ ThriftHiveMetastore_alter_wm_pool_args& operator=(const ThriftHiveMetastore_alter_wm_pool_args&);
+ ThriftHiveMetastore_alter_wm_pool_args() {
+ }
+
+ virtual ~ThriftHiveMetastore_alter_wm_pool_args() throw();
+ WMAlterPoolRequest request;
+
+ _ThriftHiveMetastore_alter_wm_pool_args__isset __isset;
+
+ void __set_request(const WMAlterPoolRequest& val);
+
+ bool operator == (const ThriftHiveMetastore_alter_wm_pool_args & rhs) const
+ {
+ if (!(request == rhs.request))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_alter_wm_pool_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_alter_wm_pool_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_alter_wm_pool_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_alter_wm_pool_pargs() throw();
+ const WMAlterPoolRequest* request;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_wm_pool_result__isset {
+ _ThriftHiveMetastore_alter_wm_pool_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+ bool o4 :1;
+} _ThriftHiveMetastore_alter_wm_pool_result__isset;
+
+class ThriftHiveMetastore_alter_wm_pool_result {
+ public:
+
+ ThriftHiveMetastore_alter_wm_pool_result(const ThriftHiveMetastore_alter_wm_pool_result&);
+ ThriftHiveMetastore_alter_wm_pool_result& operator=(const ThriftHiveMetastore_alter_wm_pool_result&);
+ ThriftHiveMetastore_alter_wm_pool_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_alter_wm_pool_result() throw();
+ WMAlterPoolResponse success;
+ AlreadyExistsException o1;
+ NoSuchObjectException o2;
+ InvalidObjectException o3;
+ MetaException o4;
+
+ _ThriftHiveMetastore_alter_wm_pool_result__isset __isset;
+
+ void __set_success(const WMAlterPoolResponse& val);
+
+ void __set_o1(const AlreadyExistsException& val);
+
+ void __set_o2(const NoSuchObjectException& val);
+
+ void __set_o3(const InvalidObjectException& val);
+
+ void __set_o4(const MetaException& val);
+
+ bool operator == (const ThriftHiveMetastore_alter_wm_pool_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ if (!(o4 == rhs.o4))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_alter_wm_pool_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_alter_wm_pool_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_wm_pool_presult__isset {
+ _ThriftHiveMetastore_alter_wm_pool_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+ bool o4 :1;
+} _ThriftHiveMetastore_alter_wm_pool_presult__isset;
+
+class ThriftHiveMetastore_alter_wm_pool_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_alter_wm_pool_presult() throw();
+ WMAlterPoolResponse* success;
+ AlreadyExistsException o1;
+ NoSuchObjectException o2;
+ InvalidObjectException o3;
+ MetaException o4;
+
+ _ThriftHiveMetastore_alter_wm_pool_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_wm_pool_args__isset {
+ _ThriftHiveMetastore_drop_wm_pool_args__isset() : request(false) {}
+ bool request :1;
+} _ThriftHiveMetastore_drop_wm_pool_args__isset;
+
+class ThriftHiveMetastore_drop_wm_pool_args {
+ public:
+
+ ThriftHiveMetastore_drop_wm_pool_args(const ThriftHiveMetastore_drop_wm_pool_args&);
+ ThriftHiveMetastore_drop_wm_pool_args& operator=(const ThriftHiveMetastore_drop_wm_pool_args&);
+ ThriftHiveMetastore_drop_wm_pool_args() {
+ }
+
+ virtual ~ThriftHiveMetastore_drop_wm_pool_args() throw();
+ WMDropPoolRequest request;
+
+ _ThriftHiveMetastore_drop_wm_pool_args__isset __isset;
+
+ void __set_request(const WMDropPoolRequest& val);
+
+ bool operator == (const ThriftHiveMetastore_drop_wm_pool_args & rhs) const
+ {
+ if (!(request == rhs.request))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_drop_wm_pool_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_drop_wm_pool_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_drop_wm_pool_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_drop_wm_pool_pargs() throw();
+ const WMDropPoolRequest* request;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_wm_pool_result__isset {
+ _ThriftHiveMetastore_drop_wm_pool_result__isset() : success(false), o1(false), o2(false), o3(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_drop_wm_pool_result__isset;
+
+class ThriftHiveMetastore_drop_wm_pool_result {
+ public:
+
+ ThriftHiveMetastore_drop_wm_pool_result(const ThriftHiveMetastore_drop_wm_pool_result&);
+ ThriftHiveMetastore_drop_wm_pool_result& operator=(const ThriftHiveMetastore_drop_wm_pool_result&);
+ ThriftHiveMetastore_drop_wm_pool_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_drop_wm_pool_result() throw();
+ WMDropPoolResponse success;
+ NoSuchObjectException o1;
+ InvalidOperationException o2;
+ MetaException o3;
+
+ _ThriftHiveMetastore_drop_wm_pool_result__isset __isset;
+
+ void __set_success(const WMDropPoolResponse& val);
+
+ void __set_o1(const NoSuchObjectException& val);
+
+ void __set_o2(const InvalidOperationException& val);
+
+ void __set_o3(const MetaException& val);
+
+ bool operator == (const ThriftHiveMetastore_drop_wm_pool_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_drop_wm_pool_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_drop_wm_pool_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_wm_pool_presult__isset {
+ _ThriftHiveMetastore_drop_wm_pool_presult__isset() : success(false), o1(false), o2(false), o3(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_drop_wm_pool_presult__isset;
+
+class ThriftHiveMetastore_drop_wm_pool_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_drop_wm_pool_presult() throw();
+ WMDropPoolResponse* success;
+ NoSuchObjectException o1;
+ InvalidOperationException o2;
+ MetaException o3;
+
+ _ThriftHiveMetastore_drop_wm_pool_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_create_or_update_wm_mapping_args__isset {
+ _ThriftHiveMetastore_create_or_update_wm_mapping_args__isset() : request(false) {}
+ bool request :1;
+} _ThriftHiveMetastore_create_or_update_wm_mapping_args__isset;
+
+class ThriftHiveMetastore_create_or_update_wm_mapping_args {
+ public:
+
+ ThriftHiveMetastore_create_or_update_wm_mapping_args(const ThriftHiveMetastore_create_or_update_wm_mapping_args&);
+ ThriftHiveMetastore_create_or_update_wm_mapping_args& operator=(const ThriftHiveMetastore_create_or_update_wm_mapping_args&);
+ ThriftHiveMetastore_create_or_update_wm_mapping_args() {
+ }
+
+ virtual ~ThriftHiveMetastore_create_or_update_wm_mapping_args() throw();
+ WMCreateOrUpdateMappingRequest request;
+
+ _ThriftHiveMetastore_create_or_update_wm_mapping_args__isset __isset;
+
+ void __set_request(const WMCreateOrUpdateMappingRequest& val);
+
+ bool operator == (const ThriftHiveMetastore_create_or_update_wm_mapping_args & rhs) const
+ {
+ if (!(request == rhs.request))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_create_or_update_wm_mapping_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_create_or_update_wm_mapping_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_create_or_update_wm_mapping_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_create_or_update_wm_mapping_pargs() throw();
+ const WMCreateOrUpdateMappingRequest* request;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_or_update_wm_mapping_result__isset {
+ _ThriftHiveMetastore_create_or_update_wm_mapping_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+ bool o4 :1;
+} _ThriftHiveMetastore_create_or_update_wm_mapping_result__isset;
+
+class ThriftHiveMetastore_create_or_update_wm_mapping_result {
+ public:
+
+ ThriftHiveMetastore_create_or_update_wm_mapping_result(const ThriftHiveMetastore_create_or_update_wm_mapping_result&);
+ ThriftHiveMetastore_create_or_update_wm_mapping_result& operator=(const ThriftHiveMetastore_create_or_update_wm_mapping_result&);
+ ThriftHiveMetastore_create_or_update_wm_mapping_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_create_or_update_wm_mapping_result() throw();
+ WMCreateOrUpdateMappingResponse success;
+ AlreadyExistsException o1;
+ NoSuchObjectException o2;
+ InvalidObjectException o3;
+ MetaException o4;
+
+ _ThriftHiveMetastore_create_or_update_wm_mapping_result__isset __isset;
+
+ void __set_success(const WMCreateOrUpdateMappingResponse& val);
+
+ void __set_o1(const AlreadyExistsException& val);
+
+ void __set_o2(const NoSuchObjectException& val);
+
+ void __set_o3(const InvalidObjectException& val);
+
+ void __set_o4(const MetaException& val);
+
+ bool operator == (const ThriftHiveMetastore_create_or_update_wm_mapping_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ if (!(o4 == rhs.o4))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_create_or_update_wm_mapping_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_create_or_update_wm_mapping_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_or_update_wm_mapping_presult__isset {
+ _ThriftHiveMetastore_create_or_update_wm_mapping_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+ bool o4 :1;
+} _ThriftHiveMetastore_create_or_update_wm_mapping_presult__isset;
+
+class ThriftHiveMetastore_create_or_update_wm_mapping_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_create_or_update_wm_mapping_presult() throw();
+ WMCreateOrUpdateMappingResponse* success;
+ AlreadyExistsException o1;
+ NoSuchObjectException o2;
+ InvalidObjectException o3;
+ MetaException o4;
+
+ _ThriftHiveMetastore_create_or_update_wm_mapping_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_wm_mapping_args__isset {
+ _ThriftHiveMetastore_drop_wm_mapping_args__isset() : request(false) {}
+ bool request :1;
+} _ThriftHiveMetastore_drop_wm_mapping_args__isset;
+
+class ThriftHiveMetastore_drop_wm_mapping_args {
+ public:
+
+ ThriftHiveMetastore_drop_wm_mapping_args(const ThriftHiveMetastore_drop_wm_mapping_args&);
+ ThriftHiveMetastore_drop_wm_mapping_args& operator=(const ThriftHiveMetastore_drop_wm_mapping_args&);
+ ThriftHiveMetastore_drop_wm_mapping_args() {
+ }
+
+ virtual ~ThriftHiveMetastore_drop_wm_mapping_args() throw();
+ WMDropMappingRequest request;
+
+ _ThriftHiveMetastore_drop_wm_mapping_args__isset __isset;
+
+ void __set_request(const WMDropMappingRequest& val);
+
+ bool operator == (const ThriftHiveMetastore_drop_wm_mapping_args & rhs) const
+ {
+ if (!(request == rhs.request))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_drop_wm_mapping_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_drop_wm_mapping_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_drop_wm_mapping_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_drop_wm_mapping_pargs() throw();
+ const WMDropMappingRequest* request;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_wm_mapping_result__isset {
+ _ThriftHiveMetastore_drop_wm_mapping_result__isset() : success(false), o1(false), o2(false), o3(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_drop_wm_mapping_result__isset;
+
+class ThriftHiveMetastore_drop_wm_mapping_result {
+ public:
+
+ ThriftHiveMetastore_drop_wm_mapping_result(const ThriftHiveMetastore_drop_wm_mapping_result&);
+ ThriftHiveMetastore_drop_wm_mapping_result& operator=(const ThriftHiveMetastore_drop_wm_mapping_result&);
+ ThriftHiveMetastore_drop_wm_mapping_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_drop_wm_mapping_result() throw();
+ WMDropMappingResponse success;
+ NoSuchObjectException o1;
+ InvalidOperationException o2;
+ MetaException o3;
+
+ _ThriftHiveMetastore_drop_wm_mapping_result__isset __isset;
+
+ void __set_success(const WMDropMappingResponse& val);
+
+ void __set_o1(const NoSuchObjectException& val);
+
+ void __set_o2(const InvalidOperationException& val);
+
+ void __set_o3(const MetaException& val);
+
+ bool operator == (const ThriftHiveMetastore_drop_wm_mapping_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_drop_wm_mapping_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_drop_wm_mapping_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_wm_mapping_presult__isset {
+ _ThriftHiveMetastore_drop_wm_mapping_presult__isset() : success(false), o1(false), o2(false), o3(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_drop_wm_mapping_presult__isset;
+
+class ThriftHiveMetastore_drop_wm_mapping_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_drop_wm_mapping_presult() throw();
+ WMDropMappingResponse* success;
+ NoSuchObjectException o1;
+ InvalidOperationException o2;
+ MetaException o3;
+
+ _ThriftHiveMetastore_drop_wm_mapping_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args__isset {
+ _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args__isset() : request(false) {}
+ bool request :1;
+} _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args__isset;
+
+class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args {
+ public:
+
+ ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args(const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args&);
+ ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args& operator=(const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args&);
+ ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args() {
+ }
+
+ virtual ~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args() throw();
+ WMCreateOrDropTriggerToPoolMappingRequest request;
+
+ _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args__isset __isset;
+
+ void __set_request(const WMCreateOrDropTriggerToPoolMappingRequest& val);
+
+ bool operator == (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args & rhs) const
+ {
+ if (!(request == rhs.request))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs() throw();
+ const WMCreateOrDropTriggerToPoolMappingRequest* request;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result__isset {
+ _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+ bool o4 :1;
+} _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result__isset;
+
+class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result {
+ public:
+
+ ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result(const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result&);
+ ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result& operator=(const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result&);
+ ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result() throw();
+ WMCreateOrDropTriggerToPoolMappingResponse success;
+ AlreadyExistsException o1;
+ NoSuchObjectException o2;
+ InvalidObjectException o3;
+ MetaException o4;
+
+ _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result__isset __isset;
+
+ void __set_success(const WMCreateOrDropTriggerToPoolMappingResponse& val);
+
+ void __set_o1(const AlreadyExistsException& val);
+
+ void __set_o2(const NoSuchObjectException& val);
+
+ void __set_o3(const InvalidObjectException& val);
+
+ void __set_o4(const MetaException& val);
+
+ bool operator == (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ if (!(o4 == rhs.o4))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult__isset {
+ _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+ bool success :1;
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+ bool o4 :1;
+} _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult__isset;
+
+class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult() throw();
+ WMCreateOrDropTriggerToPoolMappingResponse* success;
+ AlreadyExistsException o1;
+ NoSuchObjectException o2;
+ InvalidObjectException o3;
+ MetaException o4;
+
+ _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient {
+ public:
+ ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) :
+ ::facebook::fb303::FacebookServiceClient(prot, prot) {}
+ ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : ::facebook::fb303::FacebookServiceClient(iprot, oprot) {}
+ boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() {
+ return piprot_;
+ }
+ boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() {
+ return poprot_;
+ }
+ void getMetaConf(std::string& _return, const std::string& key);
+ void send_getMetaConf(const std::string& key);
+ void recv_getMetaConf(std::string& _return);
+ void setMetaConf(const std::string& key, const std::string& value);
+ void send_setMetaConf(const std::string& key, const std::string& value);
+ void recv_setMetaConf();
+ void create_database(const Database& database);
+ void send_create_database(const Database& database);
+ void recv_create_database();
+ void get_database(Database& _return, const std::string& name);
+ void send_get_database(const std::string& name);
+ void recv_get_database(Database& _return);
+ void drop_database(const std::string& name, const bool deleteData, const bool cascade);
+ void send_drop_database(const std::string& name, const bool deleteData, const bool cascade);
+ void recv_drop_database();
+ void get_databases(std::vector<std::string> & _return, const std::string& pattern);
+ void send_get_databases(const std::string& pattern);
+ void recv_get_databases(std::vector<std::string> & _return);
+ void get_all_databases(std::vector<std::string> & _return);
+ void send_get_all_databases();
+ void recv_get_all_databases(std::vector<std::string> & _return);
+ void alter_database(const std::string& dbname, const Database& db);
+ void send_alter_database(const std::string& dbname, const Database& db);
+ void recv_alter_database();
+ void get_type(Type& _return, const std::string& name);
+ void send_get_type(const std::string& name);
+ void recv_get_type(Type& _return);
+ bool create_type(const Type& type);
+ void send_create_type(const Type& type);
+ bool recv_create_type();
+ bool drop_type(const std::string& type);
+ void send_drop_type(const std::string& type);
+ bool recv_drop_type();
+ void get_type_all(std::map<std::string, Type> & _return, const std::string& name);
+ void send_get_type_all(const std::string& name);
+ void recv_get_type_all(std::map<std::string, Type> & _return);
+ void get_fields(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name);
+ void send_get_fields(const std::string& db_name, const std::string& table_name);
+ void recv_get_fields(std::vector<FieldSchema> & _return);
+ void get_fields_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
+ void send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
+ void recv_get_fields_with_environment_context(std::vector<FieldSchema> & _return);
+ void get_schema(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name);
+ void send_get_schema(const std::string& db_name, const std::string& table_name);
+ void recv_get_schema(std::vector<FieldSchema> & _return);
+ void get_schema_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
+ void send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
+ void recv_get_schema_with_environment_context(std::vector<FieldSchema> & _return);
+ void create_table(const Table& tbl);
+ void send_create_table(const Table& tbl);
+ void recv_create_table();
+ void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context);
+ void send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context);
+ void recv_create_table_with_environment_context();
+ void create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints);
+ void send_create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints);
+ void recv_create_table_with_constraints();
+ void drop_constraint(const DropConstraintRequest& req);
+ void send_drop_constraint(const DropConstraintRequest& req);
+ void recv_drop_constraint();
+ void add_primary_key(const AddPrimaryKeyRequest& req);
+ void send_add_primary_key(const AddPrimaryKeyRequest& req);
+ void recv_add_primary_key();
+ void add_foreign_key(const AddForeignKeyRequest& req);
+ void send_add_foreign_key(const AddForeignKeyRequest& req);
+ void recv_add_foreign_key();
+ void add_unique_constraint(const AddUniqueConstraintRequest& req);
+ void send_add_unique_constraint(const AddUniqueConstraintRequest& req);
+ void recv_add_unique_constraint();
+ void add_not_null_constraint(const AddNotNullConstraintRequest& req);
+ void send_add_not_null_constraint(const AddNotNullConstraintRequest& req);
+ void recv_add_not_null_constraint();
+ void drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
+ void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
+ void recv_drop_table();
+ void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
+ void send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
+ void recv_drop_table_with_environment_context();
+ void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
+ void send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
+ void recv_truncate_table();
+ void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern);
+ void send_get_tables(const std::string& db_name, const std::string& pattern);
+ void recv_get_tables(std::vector<std::string> & _return);
+ void get_tables_by_type(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType);
+ void send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType);
+ void recv_get_tables_by_type(std::vector<std::string> & _return);
+ void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
+ void send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
+ void recv_get_table_meta(std::vector<TableMeta> & _return);
+ void get_all_tables(std::vector<std::string> & _return, const std::string& db_name);
+ void send_get_all_tables(const std::string& db_name);
+ void recv_get_all_tables(std::vector<std::string> & _return);
+ void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name);
+ void send_get_table(const std::string& dbname, const std::string& tbl_name);
+ void recv_get_table(Table& _return);
+ void get_table_objects_by_name(std::vector<Table> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names);
+ void send_get_table_objects_by_name(const std::string& dbname, const std::vector<std::string> & tbl_names);
+ void recv_get_table_objects_by_name(std::vector<Table> & _return);
+ void get_table_req(GetTableResult& _return, const GetTableRequest& req);
+ void send_get_table_req(const GetTableRequest& req);
+ void recv_get_table_req(GetTableResult& _return);
+ void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req);
+ void send_get_table_objects_by_name_req(const GetTablesRequest& req);
+ void recv_get_table_objects_by_name_req(GetTablesResult& _return);
+ void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables);
+ void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables);
+ void recv_get_table_names_by_filter(std::vector<std::string> & _return);
+ void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl);
+ void send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl);
+ void recv_alter_table();
+ void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context);
+ void send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context);
+ void recv_alter_table_with_environment_context();
+ void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade);
+ void send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade);
+ void recv_alter_table_with_cascade();
+ void add_partition(Partition& _return, const Partition& new_part);
+ void send_add_partition(const Partition& new_part);
+ void recv_add_partition(Partition& _return);
+ void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context);
+ void send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context);
+ void recv_add_partition_with_environment_context(Partition& _return);
+ int32_t add_partitions(const std::vector<Partition> & new_parts);
+ void send_add_partitions(const std::vector<Partition> & new_parts);
+ int32_t recv_add_partitions();
+ int32_t add_partitions_pspec(const std::vector<PartitionSpec> & new_parts);
+ void send_add_partitions_pspec(const std::vector<PartitionSpec> & new_parts);
+ int32_t recv_add_partitions_pspec();
+ void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
+ void send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
+ void recv_append_partition(Partition& _return);
+ void add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request);
+ void send_add_partitions_req(const AddPartitionsRequest& request);
+ void recv_add_partitions_req(AddPartitionsResult& _return);
+ void append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const EnvironmentContext& environment_context);
+ void send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const EnvironmentContext& environment_context);
+ void recv_append_partition_with_environment_context(Partition& _return);
+ void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
+ void send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
+ void recv_append_partition_by_name(Partition& _return);
+ void append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context);
+ void send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context);
+ void recv_append_partition_by_name_with_environment_context(Partition& _return);
+ bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData);
+ void send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData);
+ bool recv_drop_partition();
+ bool drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData, const EnvironmentContext& environment_context);
+ void send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData, const EnvironmentContext& environment_context);
+ bool recv_drop_partition_with_environment_context();
+ bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData);
+ void send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData);
+ bool recv_drop_partition_by_name();
+ bool drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context);
+ void send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context);
+ bool recv_drop_partition_by_name_with_environment_context();
+ void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req);
+ void send_drop_partitions_req(const DropPartitionsRequest& req);
+ void recv_drop_partitions_req(DropPartitionsResult& _return);
+ void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
+ void send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
+ void recv_get_partition(Partition& _return);
+ void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
+ void send_exchange_partition(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
+ void recv_exchange_partition(Partition& _return);
+ void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
+ void send_exchange_partitions(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
+ void recv_exchange_partitions(std::vector<Partition> & _return);
+ void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
+ void send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
+ void recv_get_partition_with_auth(Partition& _return);
+ void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
+ void send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
+ void recv_get_partition_by_name(Partition& _return);
+ void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
+ void send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
+ void recv_get_partitions(std::vector<Partition> & _return);
+ void get_partitions_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
+ void send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
+ void recv_get_partitions_with_auth(std::vector<Partition> & _return);
+ void get_partitions_pspec(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts);
+ void send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts);
+ void recv_get_partitions_pspec(std::vector<PartitionSpec> & _return);
+ void get_partition_names(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
+ void send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
+ void recv_get_partition_names(std::vector<std::string> & _return);
+ void get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request);
+ void send_get_partition_values(const PartitionValuesRequest& request);
+ void recv_get_partition_values(PartitionValuesResponse& _return);
+ void get_partitions_ps(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
+ void send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
+ void recv_get_partitions_ps(std::vector<Partition> & _return);
+ void get_partitions_ps_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
+ void send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
+ void recv_get_partitions_ps_with_auth(std::vector<Partition> & _return);
+ void get_partition_names_ps(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
+ void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
+ void recv_get_partition_names_ps(std::vector<std::string> & _return);
+ void get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
+ void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
+ void recv_get_partitions_by_filter(std::vector<Partition> & _return);
+ void get_part_specs_by_filter(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts);
+ void send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts);
+ void recv_get_part_specs_by_filter(std::vector<PartitionSpec> & _return);
+ void get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req);
+ void send_get_partitions_by_expr(const PartitionsByExprRequest& req);
+ void recv_get_partitions_by_expr(PartitionsByExprResult& _return);
+ int32_t get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter);
+ void send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter);
+ int32_t recv_get_num_partitions_by_filter();
+ void get_partitions_by_names(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & names);
+ void send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & names);
+ void recv_get_partitions_by_names(std::vector<Partition> & _return);
+ void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
+ void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
+ void recv_alter_partition();
+ void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
+ void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
+ void recv_alter_partitions();
+ void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
+ void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
+ void recv_alter_partitions_with_environment_context();
+ void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
+ void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
+ void recv_alter_partition_with_environment_context();
void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const Partition& new_part);
void send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const Partition& new_part);
void recv_rename_partition();
@@ -22611,6 +23435,24 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request);
void send_get_triggers_for_resourceplan(const WMGetTriggersForResourePlanRequest& request);
void recv_get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return);
+ void create_wm_pool(WMCreatePoolResponse& _return, const WMCreatePoolRequest& request);
+ void send_create_wm_pool(const WMCreatePoolRequest& request);
+ void recv_create_wm_pool(WMCreatePoolResponse& _return);
+ void alter_wm_pool(WMAlterPoolResponse& _return, const WMAlterPoolRequest& request);
+ void send_alter_wm_pool(const WMAlterPoolRequest& request);
+ void recv_alter_wm_pool(WMAlterPoolResponse& _return);
+ void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request);
+ void send_drop_wm_pool(const WMDropPoolRequest& request);
+ void recv_drop_wm_pool(WMDropPoolResponse& _return);
+ void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request);
+ void send_create_or_update_wm_mapping(const WMCreateOrUpdateMappingRequest& request);
+ void recv_create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return);
+ void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request);
+ void send_drop_wm_mapping(const WMDropMappingRequest& request);
+ void recv_drop_wm_mapping(WMDropMappingResponse& _return);
+ void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request);
+ void send_create_or_drop_wm_trigger_to_pool_mapping(const WMCreateOrDropTriggerToPoolMappingRequest& request);
+ void recv_create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return);
};
class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceProcessor {
@@ -22794,6 +23636,12 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
void process_alter_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_drop_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_get_triggers_for_resourceplan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_create_wm_pool(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_alter_wm_pool(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_drop_wm_pool(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_create_or_update_wm_mapping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_drop_wm_mapping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_create_or_drop_wm_trigger_to_pool_mapping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
public:
ThriftHiveMetastoreProcessor(boost::shared_ptr<ThriftHiveMetastoreIf> iface) :
::facebook::fb303::FacebookServiceProcessor(iface),
@@ -22971,6 +23819,12 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
processMap_["alter_wm_trigger"] = &ThriftHiveMetastoreProcessor::process_alter_wm_trigger;
processMap_["drop_wm_trigger"] = &ThriftHiveMetastoreProcessor::process_drop_wm_trigger;
processMap_["get_triggers_for_resourceplan"] = &ThriftHiveMetastoreProcessor::process_get_triggers_for_resourceplan;
+ processMap_["create_wm_pool"] = &ThriftHiveMetastoreProcessor::process_create_wm_pool;
+ processMap_["alter_wm_pool"] = &ThriftHiveMetastoreProcessor::process_alter_wm_pool;
+ processMap_["drop_wm_pool"] = &ThriftHiveMetastoreProcessor::process_drop_wm_pool;
+ processMap_["create_or_update_wm_mapping"] = &ThriftHiveMetastoreProcessor::process_create_or_update_wm_mapping;
+ processMap_["drop_wm_mapping"] = &ThriftHiveMetastoreProcessor::process_drop_wm_mapping;
+ processMap_["create_or_drop_wm_trigger_to_pool_mapping"] = &ThriftHiveMetastoreProcessor::process_create_or_drop_wm_trigger_to_pool_mapping;
}
virtual ~ThriftHiveMetastoreProcessor() {}
@@ -24668,6 +25522,66 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
return;
}
+ void create_wm_pool(WMCreatePoolResponse& _return, const WMCreatePoolRequest& request) {
+ size_t sz = ifaces_.size();
+ size_t i = 0;
+ for (; i < (sz - 1); ++i) {
+ ifaces_[i]->create_wm_pool(_return, request);
+ }
+ ifaces_[i]->create_wm_pool(_return, request);
+ return;
+ }
+
+ void alter_wm_pool(WMAlterPoolResponse& _return, const WMAlterPoolRequest& request) {
+ size_t sz = ifaces_.size();
+ size_t i = 0;
+ for (; i < (sz - 1); ++i) {
+ ifaces_[i]->alter_wm_pool(_return, request);
+ }
+ ifaces_[i]->alter_wm_pool(_return, request);
+ return;
+ }
+
+ void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) {
+ size_t sz = ifaces_.size();
+ size_t i = 0;
+ for (; i < (sz - 1); ++i) {
+ ifaces_[i]->drop_wm_pool(_return, request);
+ }
+ ifaces_[i]->drop_wm_pool(_return, request);
+ return;
+ }
+
+ void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) {
+ size_t sz = ifaces_.size();
+ size_t i = 0;
+ for (; i < (sz - 1); ++i) {
+ ifaces_[i]->create_or_update_wm_mapping(_return, request);
+ }
+ ifaces_[i]->create_or_update_wm_mapping(_return, request);
+ return;
+ }
+
+ void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) {
+ size_t sz = ifaces_.size();
+ size_t i = 0;
+ for (; i < (sz - 1); ++i) {
+ ifaces_[i]->drop_wm_mapping(_return, request);
+ }
+ ifaces_[i]->drop_wm_mapping(_return, request);
+ return;
+ }
+
+ void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) {
+ size_t sz = ifaces_.size();
+ size_t i = 0;
+ for (; i < (sz - 1); ++i) {
+ ifaces_[i]->create_or_drop_wm_trigger_to_pool_mapping(_return, request);
+ }
+ ifaces_[i]->create_or_drop_wm_trigger_to_pool_mapping(_return, request);
+ return;
+ }
+
};
// The 'concurrent' client is a thread safe client that correctly handles
@@ -25203,6 +26117,24 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request);
int32_t send_get_triggers_for_resourceplan(const WMGetTriggersForResourePlanRequest& request);
void recv_get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const int32_t seqid);
+ void create_wm_pool(WMCreatePoolResponse& _return, const WMCreatePoolRequest& request);
+ int32_t send_create_wm_pool(const WMCreatePoolRequest& request);
+ void recv_create_wm_pool(WMCreatePoolResponse& _return, const int32_t seqid);
+ void alter_wm_pool(WMAlterPoolResponse& _return, const WMAlterPoolRequest& request);
+ int32_t send_alter_wm_pool(const WMAlterPoolRequest& request);
+ void recv_alter_wm_pool(WMAlterPoolResponse& _return, const int32_t seqid);
+ void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request);
+ int32_t send_drop_wm_pool(const WMDropPoolRequest& request);
+ void recv_drop_wm_pool(WMDropPoolResponse& _return, const int32_t seqid);
+ void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request);
+ int32_t send_create_or_update_wm_mapping(const WMCreateOrUpdateMappingRequest& request);
+ void recv_create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const int32_t seqid);
+ void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request);
+ int32_t send_drop_wm_mapping(const WMDropMappingRequest& request);
+ void recv_drop_wm_mapping(WMDropMappingResponse& _return, const int32_t seqid);
+ void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request);
+ int32_t send_create_or_drop_wm_trigger_to_pool_mapping(const WMCreateOrDropTriggerToPoolMappingRequest& request);
+ void recv_create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const int32_t seqid);
};
#ifdef _WIN32
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index 2aa3954..bf4bd7a 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -887,6 +887,36 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
printf("get_triggers_for_resourceplan\n");
}
+ void create_wm_pool(WMCreatePoolResponse& _return, const WMCreatePoolRequest& request) {
+ // Your implementation goes here
+ printf("create_wm_pool\n");
+ }
+
+ void alter_wm_pool(WMAlterPoolResponse& _return, const WMAlterPoolRequest& request) {
+ // Your implementation goes here
+ printf("alter_wm_pool\n");
+ }
+
+ void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) {
+ // Your implementation goes here
+ printf("drop_wm_pool\n");
+ }
+
+ void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) {
+ // Your implementation goes here
+ printf("create_or_update_wm_mapping\n");
+ }
+
+ void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) {
+ // Your implementation goes here
+ printf("drop_wm_mapping\n");
+ }
+
+ void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) {
+ // Your implementation goes here
+ printf("create_or_drop_wm_trigger_to_pool_mapping\n");
+ }
+
};
int main(int argc, char **argv) {
[13/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
HIVE-17954 : Implement pool, user, group and trigger to pool management API's (Harish Jaiprakash, reviewed by Sergey Shelukhin)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/44ef5991
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/44ef5991
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/44ef5991
Branch: refs/heads/master
Commit: 44ef599155efa998b59b0723b2bb705bf60a1f21
Parents: be1f847
Author: sergey <se...@apache.org>
Authored: Mon Nov 27 14:27:21 2017 -0800
Committer: sergey <se...@apache.org>
Committed: Mon Nov 27 14:27:48 2017 -0800
----------------------------------------------------------------------
.../listener/DummyRawStoreFailEvent.java | 48 +-
.../hive/jdbc/TestTriggersWorkloadManager.java | 2 +-
.../upgrade/derby/046-HIVE-17566.derby.sql | 3 +-
.../upgrade/derby/hive-schema-3.0.0.derby.sql | 4 +-
.../upgrade/hive/hive-schema-3.0.0.hive.sql | 69 +
.../upgrade/mssql/031-HIVE-17566.mssql.sql | 4 +-
.../upgrade/mssql/hive-schema-3.0.0.mssql.sql | 3 -
.../upgrade/mysql/046-HIVE-17566.mysql.sql | 4 +-
.../upgrade/mysql/hive-schema-3.0.0.mysql.sql | 4 +-
.../upgrade/oracle/046-HIVE-17566.oracle.sql | 2 -
.../upgrade/oracle/hive-schema-3.0.0.oracle.sql | 3 -
.../postgres/045-HIVE-17566.postgres.sql | 3 -
.../postgres/hive-schema-3.0.0.postgres.sql | 1 -
.../hive/metastore/HiveMetaStoreClient.java | 55 +
.../hadoop/hive/metastore/IMetaStoreClient.java | 21 +
.../DummyRawStoreControlledCommit.java | 48 +-
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 117 +-
.../hive/ql/exec/tez/UserPoolMapping.java | 4 +-
.../apache/hadoop/hive/ql/metadata/Hive.java | 53 +
.../hive/ql/parse/DDLSemanticAnalyzer.java | 246 +-
.../org/apache/hadoop/hive/ql/parse/HiveLexer.g | 4 +
.../apache/hadoop/hive/ql/parse/HiveParser.g | 153 +-
.../hadoop/hive/ql/parse/IdentifiersParser.g | 4 +-
.../hadoop/hive/ql/parse/ParseDriver.java | 3 +
.../hadoop/hive/ql/parse/ResourcePlanParser.g | 230 +
.../hive/ql/parse/SemanticAnalyzerFactory.java | 20 +-
.../hive/ql/plan/AlterResourcePlanDesc.java | 89 +-
.../hadoop/hive/ql/plan/AlterWMTriggerDesc.java | 54 +-
.../ql/plan/CreateOrAlterWMMappingDesc.java | 41 +
.../hive/ql/plan/CreateOrAlterWMPoolDesc.java | 50 +
.../CreateOrDropTriggerToPoolMappingDesc.java | 66 +
.../hive/ql/plan/CreateResourcePlanDesc.java | 24 +-
.../hive/ql/plan/CreateWMTriggerDesc.java | 54 +-
.../org/apache/hadoop/hive/ql/plan/DDLWork.java | 88 +-
.../hadoop/hive/ql/plan/DropWMMappingDesc.java | 29 +
.../hadoop/hive/ql/plan/DropWMPoolDesc.java | 33 +
.../hadoop/hive/ql/plan/HiveOperation.java | 9 +-
.../authorization/plugin/HiveOperationType.java | 6 +
.../plugin/sqlstd/Operation2Privilege.java | 12 +
.../hive/ql/exec/tez/TestWorkloadManager.java | 2 +-
.../test/queries/clientpositive/resourceplan.q | 154 +-
.../clientpositive/llap/resourceplan.q.out | 648 +-
.../results/clientpositive/llap/sysdb.q.out | 161 +-
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 13702 ++++++++++-------
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 1398 +-
.../ThriftHiveMetastore_server.skeleton.cpp | 30 +
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 1636 +-
.../gen/thrift/gen-cpp/hive_metastore_types.h | 584 +-
.../hive/metastore/api/ThriftHiveMetastore.java | 10718 +++++++++++--
.../hive/metastore/api/WMAlterPoolRequest.java | 504 +
.../hive/metastore/api/WMAlterPoolResponse.java | 283 +
...CreateOrDropTriggerToPoolMappingRequest.java | 708 +
...reateOrDropTriggerToPoolMappingResponse.java | 283 +
.../api/WMCreateOrUpdateMappingRequest.java | 501 +
.../api/WMCreateOrUpdateMappingResponse.java | 283 +
.../hive/metastore/api/WMCreatePoolRequest.java | 398 +
.../metastore/api/WMCreatePoolResponse.java | 283 +
.../metastore/api/WMDropMappingRequest.java | 398 +
.../metastore/api/WMDropMappingResponse.java | 283 +
.../hive/metastore/api/WMDropPoolRequest.java | 499 +
.../hive/metastore/api/WMDropPoolResponse.java | 283 +
.../hadoop/hive/metastore/api/WMMapping.java | 112 +-
.../gen-php/metastore/ThriftHiveMetastore.php | 2930 +++-
.../src/gen/thrift/gen-php/metastore/Types.php | 924 +-
.../hive_metastore/ThriftHiveMetastore-remote | 42 +
.../hive_metastore/ThriftHiveMetastore.py | 5440 ++++---
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 766 +-
.../gen/thrift/gen-rb/hive_metastore_types.rb | 202 +-
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 446 +
.../hadoop/hive/metastore/HiveMetaStore.java | 84 +-
.../hadoop/hive/metastore/ObjectStore.java | 372 +-
.../apache/hadoop/hive/metastore/RawStore.java | 27 +-
.../hive/metastore/cache/CachedStore.java | 48 +-
.../hadoop/hive/metastore/model/MWMPool.java | 23 +-
.../src/main/resources/package.jdo | 10 +-
.../src/main/thrift/hive_metastore.thrift | 67 +-
.../DummyRawStoreForJdoConnection.java | 40 +
77 files changed, 36467 insertions(+), 10468 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 7196756..7965ca3 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -74,6 +74,8 @@ import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.thrift.TException;
@@ -982,7 +984,7 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
@Override
public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
- throws AlreadyExistsException, MetaException {
+ throws AlreadyExistsException, InvalidObjectException, MetaException {
objectStore.createResourcePlan(resourcePlan, defaultPoolSize);
}
@@ -1043,4 +1045,48 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
throws NoSuchObjectException, MetaException {
return objectStore.getTriggersForResourcePlan(resourcePlanName);
}
+
+ @Override
+ public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ objectStore.createPool(pool);
+ }
+
+ @Override
+ public void alterPool(WMPool pool, String poolPath) throws AlreadyExistsException,
+ NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.alterPool(pool, poolPath);
+ }
+
+ @Override
+ public void dropWMPool(String resourcePlanName, String poolPath)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMPool(resourcePlanName, poolPath);
+ }
+
+ @Override
+ public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+ MetaException {
+ objectStore.createOrUpdateWMMapping(mapping, update);
+ }
+
+ @Override
+ public void dropWMMapping(WMMapping mapping)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMMapping(mapping);
+ }
+
+ @Override
+ public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+ }
+
+ @Override
+ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
index 0506f67..285e533 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
@@ -86,4 +86,4 @@ public class TestTriggersWorkloadManager extends TestTriggersTezSessionPoolManag
}
wm.updateResourcePlanAsync(rp).get(10, TimeUnit.SECONDS);
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql b/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql
index 9f12153..8eb197c 100644
--- a/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql
+++ b/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql
@@ -2,11 +2,10 @@ CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) N
CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME");
ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID");
-CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID BIGINT, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
+CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH");
ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID");
ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024));
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
index 16aae7a..f93d0d1 100644
--- a/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
@@ -112,7 +112,7 @@ CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NU
CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT);
-CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID BIGINT, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
+CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024));
@@ -356,8 +356,6 @@ ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID");
ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
-
ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID");
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql
index 68d8d37..7589101 100644
--- a/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql
@@ -989,6 +989,75 @@ ON
t.RP_ID = r.RP_ID"
);
+CREATE TABLE IF NOT EXISTS `WM_POOLS` (
+ `RP_NAME` string,
+ `PATH` string,
+ `ALLOC_FRACTION` double,
+ `QUERY_PARALLELISM` int,
+ `SCHEDULING_POLICY` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ WM_POOL.PATH,
+ WM_POOL.ALLOC_FRACTION,
+ WM_POOL.QUERY_PARALLELISM,
+ WM_POOL.SCHEDULING_POLICY
+FROM
+ WM_POOL
+JOIN
+ WM_RESOURCEPLAN
+ON
+ WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+);
+
+CREATE TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` (
+ `RP_NAME` string,
+ `POOL_PATH` string,
+ `TRIGGER_NAME` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME RP_NAME,
+ WM_POOL.PATH POOL_PATH,
+ WM_TRIGGER.NAME TRIGGER_NAME
+FROM
+ WM_POOL_TO_TRIGGER
+JOIN WM_POOL ON WM_POOL_TO_TRIGGER.POOL_ID = WM_POOL.POOL_ID
+JOIN WM_TRIGGER ON WM_POOL_TO_TRIGGER.TRIGGER_ID = WM_TRIGGER.TRIGGER_ID
+JOIN WM_RESOURCEPLAN ON WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+);
+
+CREATE TABLE IF NOT EXISTS `WM_MAPPINGS` (
+ `RP_NAME` string,
+ `ENTITY_TYPE` string,
+ `ENTITY_NAME` string,
+ `POOL_PATH` string,
+ `ORDERING` int
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ ENTITY_TYPE,
+ ENTITY_NAME,
+ WM_POOL.PATH,
+ ORDERING
+FROM
+ WM_MAPPING
+JOIN WM_RESOURCEPLAN ON WM_MAPPING.RP_ID = WM_RESOURCEPLAN.RP_ID
+LEFT OUTER JOIN WM_POOL ON WM_POOL.POOL_ID = WM_MAPPING.POOL_ID"
+);
+
+
DROP DATABASE IF EXISTS INFORMATION_SCHEMA;
CREATE DATABASE INFORMATION_SCHEMA;
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql b/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql
index 06d82e0..945bda4 100644
--- a/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql
@@ -17,7 +17,6 @@ CREATE TABLE WM_POOL
POOL_ID bigint NOT NULL,
RP_ID bigint NOT NULL,
PATH nvarchar(1024) NOT NULL,
- PARENT_POOL_ID bigint,
ALLOC_FRACTION DOUBLE,
QUERY_PARALLELISM int,
SCHEDULING_POLICY nvarchar(1024)
@@ -26,8 +25,9 @@ CREATE TABLE WM_POOL
ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, "NAME");
+
ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
-ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
CREATE TABLE WM_TRIGGER
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
index 70e1267..26c82af 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
@@ -613,7 +613,6 @@ CREATE TABLE WM_POOL
POOL_ID bigint NOT NULL,
RP_ID bigint NOT NULL,
PATH nvarchar(1024) NOT NULL,
- PARENT_POOL_ID bigint,
ALLOC_FRACTION DOUBLE,
QUERY_PARALLELISM int,
SCHEDULING_POLICY nvarchar(1024)
@@ -935,8 +934,6 @@ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_
ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
-ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
-
-- -----------------------------------------------------------------------------------------------------------------------------------------------
-- Transaction and Lock Tables
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql b/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql
index cff0b85..34fcfe6 100644
--- a/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql
@@ -13,14 +13,12 @@ CREATE TABLE IF NOT EXISTS WM_POOL
`POOL_ID` bigint(20) NOT NULL,
`RP_ID` bigint(20) NOT NULL,
`PATH` varchar(767) NOT NULL,
- `PARENT_POOL_ID` bigint(20),
`ALLOC_FRACTION` DOUBLE,
`QUERY_PARALLELISM` int(11),
`SCHEDULING_POLICY` varchar(767),
PRIMARY KEY (`POOL_ID`),
KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`),
- CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`),
- CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
+ CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`);
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
index 9b66e83..ec95c17 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
@@ -864,14 +864,12 @@ CREATE TABLE IF NOT EXISTS WM_POOL
`POOL_ID` bigint(20) NOT NULL,
`RP_ID` bigint(20) NOT NULL,
`PATH` varchar(767) NOT NULL,
- `PARENT_POOL_ID` bigint(20),
`ALLOC_FRACTION` DOUBLE,
`QUERY_PARALLELISM` int(11),
`SCHEDULING_POLICY` varchar(767),
PRIMARY KEY (`POOL_ID`),
KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`),
- CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`),
- CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
+ CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`);
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql b/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql
index ceab459..596bb60 100644
--- a/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql
@@ -17,7 +17,6 @@ CREATE TABLE WM_POOL
POOL_ID bigint NOT NULL,
RP_ID bigint NOT NULL,
PATH nvarchar(1024) NOT NULL,
- PARENT_POOL_ID bigint,
ALLOC_FRACTION DOUBLE,
QUERY_PARALLELISM int,
SCHEDULING_POLICY nvarchar(1024)
@@ -27,7 +26,6 @@ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, "NAME");
ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
-ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
index de55e70..65c72af 100644
--- a/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
@@ -594,7 +594,6 @@ CREATE TABLE WM_POOL
POOL_ID bigint NOT NULL,
RP_ID bigint NOT NULL,
PATH nvarchar(1024) NOT NULL,
- PARENT_POOL_ID bigint,
ALLOC_FRACTION DOUBLE,
QUERY_PARALLELISM int,
SCHEDULING_POLICY nvarchar(1024)
@@ -878,8 +877,6 @@ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFA
ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
-ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
-
CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql b/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql
index 07fb6b7..bd588c4 100644
--- a/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql
@@ -17,7 +17,6 @@ CREATE TABLE "WM_POOL" (
"POOL_ID" bigint NOT NULL,
"RP_ID" bigint NOT NULL,
"PATH" character varying(1024) NOT NULL,
- "PARENT_POOL_ID" bigint,
"ALLOC_FRACTION" DOUBLE,
"QUERY_PARALLELISM" integer,
"SCHEDULING_POLICY" character varying(1024)
@@ -31,8 +30,6 @@ ALTER TABLE ONLY "WM_POOL"
ALTER TABLE ONLY "WM_POOL"
ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
-ALTER TABLE ONLY "WM_POOL"
- ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
ALTER TABLE ONLY "WM_RESOURCEPLAN"
ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
index 23626c0..931d3e6 100644
--- a/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
@@ -631,7 +631,6 @@ CREATE TABLE "WM_POOL" (
"POOL_ID" bigint NOT NULL,
"RP_ID" bigint NOT NULL,
"PATH" character varying(1024) NOT NULL,
- "PARENT_POOL_ID" bigint,
"ALLOC_FRACTION" DOUBLE,
"QUERY_PARALLELISM" integer,
"SCHEDULING_POLICY" character varying(1024)
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index b5a9b79..4a32704 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -2724,4 +2724,59 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
request.setResourcePlanName(resourcePlan);
return client.get_triggers_for_resourceplan(request).getTriggers();
}
+
+ @Override
+ public void createWMPool(WMPool pool)
+ throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
+ WMCreatePoolRequest request = new WMCreatePoolRequest();
+ request.setPool(pool);
+ client.create_wm_pool(request);
+ }
+
+ @Override
+ public void alterWMPool(WMPool pool, String poolPath)
+ throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
+ WMAlterPoolRequest request = new WMAlterPoolRequest();
+ request.setPool(pool);
+ request.setPoolPath(poolPath);
+ client.alter_wm_pool(request);
+ }
+
+ @Override
+ public void dropWMPool(String resourcePlanName, String poolPath)
+ throws NoSuchObjectException, MetaException, TException {
+ WMDropPoolRequest request = new WMDropPoolRequest();
+ request.setResourcePlanName(resourcePlanName);
+ request.setPoolPath(poolPath);
+ client.drop_wm_pool(request);
+ }
+
+ @Override
+ public void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate)
+ throws NoSuchObjectException, InvalidObjectException, MetaException, TException {
+ WMCreateOrUpdateMappingRequest request = new WMCreateOrUpdateMappingRequest();
+ request.setMapping(mapping);
+ request.setUpdate(isUpdate);
+ client.create_or_update_wm_mapping(request);
+ }
+
+ @Override
+ public void dropWMMapping(WMMapping mapping)
+ throws NoSuchObjectException, MetaException, TException {
+ WMDropMappingRequest request = new WMDropMappingRequest();
+ request.setMapping(mapping);
+ client.drop_wm_mapping(request);
+ }
+
+ @Override
+ public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidObjectException, MetaException, TException {
+ WMCreateOrDropTriggerToPoolMappingRequest request = new WMCreateOrDropTriggerToPoolMappingRequest();
+ request.setResourcePlanName(resourcePlanName);
+ request.setTriggerName(triggerName);
+ request.setPoolPath(poolPath);
+ request.setDrop(shouldDrop);
+ client.create_or_drop_wm_trigger_to_pool_mapping(request);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 2cb255e..0020136 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -107,6 +107,8 @@ import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.thrift.TException;
@@ -1802,4 +1804,23 @@ public interface IMetaStoreClient {
List<WMTrigger> getTriggersForResourcePlan(String resourcePlan)
throws NoSuchObjectException, MetaException, TException;
+
+ void createWMPool(WMPool pool)
+ throws NoSuchObjectException, InvalidObjectException, MetaException, TException;
+
+ void alterWMPool(WMPool pool, String poolPath)
+ throws NoSuchObjectException, InvalidObjectException, MetaException, TException;
+
+ void dropWMPool(String resourcePlanName, String poolPath)
+ throws NoSuchObjectException, MetaException, TException;
+
+ void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate)
+ throws NoSuchObjectException, InvalidObjectException, MetaException, TException;
+
+ void dropWMMapping(WMMapping mapping)
+ throws NoSuchObjectException, MetaException, TException;
+
+ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidObjectException, MetaException, TException;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 95aeb25..a0a6e181 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -70,6 +70,8 @@ import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.thrift.TException;
@@ -942,7 +944,7 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
@Override
public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
- throws AlreadyExistsException, MetaException {
+ throws AlreadyExistsException, InvalidObjectException, MetaException {
objectStore.createResourcePlan(resourcePlan, defaultPoolSize);
}
@@ -1004,4 +1006,48 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
throws NoSuchObjectException, MetaException {
return objectStore.getTriggersForResourcePlan(resourcePlanName);
}
+
+ @Override
+ public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ objectStore.createPool(pool);
+ }
+
+ @Override
+ public void alterPool(WMPool pool, String poolPath) throws AlreadyExistsException,
+ NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.alterPool(pool, poolPath);
+ }
+
+ @Override
+ public void dropWMPool(String resourcePlanName, String poolPath)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMPool(resourcePlanName, poolPath);
+ }
+
+ @Override
+ public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+ MetaException {
+ objectStore.createOrUpdateWMMapping(mapping, update);
+ }
+
+ @Override
+ public void dropWMMapping(WMMapping mapping)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMMapping(mapping);
+ }
+
+ @Override
+ public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+ }
+
+ @Override
+ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 17640f3..4076a9f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -21,6 +21,13 @@ package org.apache.hadoop.hive.ql.exec;
import static org.apache.commons.lang.StringUtils.join;
import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
+import java.util.concurrent.ExecutionException;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ListenableFuture;
+
import java.io.BufferedWriter;
import java.io.DataOutputStream;
import java.io.FileNotFoundException;
@@ -48,7 +55,6 @@ import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
-import java.util.concurrent.ExecutionException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -104,7 +110,6 @@ import org.apache.hadoop.hive.metastore.api.TxnInfo;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.Context;
@@ -177,6 +182,7 @@ import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc;
import org.apache.hadoop.hive.ql.plan.ColStatistics;
import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc;
import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
@@ -190,6 +196,8 @@ import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.DropIndexDesc;
import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc;
+import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc;
import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc;
import org.apache.hadoop.hive.ql.plan.FileMergeDesc;
import org.apache.hadoop.hive.ql.plan.GrantDesc;
@@ -232,6 +240,8 @@ import org.apache.hadoop.hive.ql.plan.TezWork;
import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
import org.apache.hadoop.hive.ql.plan.api.StageType;
import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils;
import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationTranslator;
@@ -275,11 +285,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.stringtemplate.v4.ST;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ListenableFuture;
-
/**
* DDLTask implementation.
*
@@ -648,10 +653,29 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
return dropWMTrigger(db, work.getDropWMTriggerDesc());
}
+ if (work.getWmPoolDesc() != null) {
+ return createOrAlterWMPool(db, work.getWmPoolDesc());
+ }
+
+ if (work.getDropWMPoolDesc() != null) {
+ return dropWMPool(db, work.getDropWMPoolDesc());
+ }
+
+ if (work.getWmMappingDesc() != null) {
+ return createOrAlterWMMapping(db, work.getWmMappingDesc());
+ }
+
+ if (work.getDropWMMappingDesc() != null) {
+ return dropWMMapping(db, work.getDropWMMappingDesc());
+ }
+
+ if (work.getTriggerToPoolMappingDesc() != null) {
+ return createOrDropTriggerToPoolMapping(db, work.getTriggerToPoolMappingDesc());
+ }
+
if (work.getAlterMaterializedViewDesc() != null) {
return alterMaterializedView(db, work.getAlterMaterializedViewDesc());
}
-
} catch (Throwable e) {
failed(e);
return 1;
@@ -662,12 +686,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
private int createResourcePlan(Hive db, CreateResourcePlanDesc createResourcePlanDesc)
throws HiveException {
- WMResourcePlan resourcePlan = new WMResourcePlan();
- resourcePlan.setName(createResourcePlanDesc.getName());
- if (createResourcePlanDesc.getQueryParallelism() != null) {
- resourcePlan.setQueryParallelism(createResourcePlanDesc.getQueryParallelism());
- }
- db.createResourcePlan(resourcePlan);
+ db.createResourcePlan(createResourcePlanDesc.getResourcePlan());
return 0;
}
@@ -694,42 +713,29 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveException {
if (desc.shouldValidate()) {
- return db.validateResourcePlan(desc.getRpName()) ? 0 : 1;
- }
-
- WMResourcePlan resourcePlan = new WMResourcePlan();
- if (desc.getNewName() != null) {
- resourcePlan.setName(desc.getNewName());
- } else {
- resourcePlan.setName(desc.getRpName());
- }
-
- if (desc.getQueryParallelism() != null) {
- resourcePlan.setQueryParallelism(desc.getQueryParallelism());
- }
-
- if (desc.getDefaultPoolPath() != null) {
- resourcePlan.setDefaultPoolPath(desc.getDefaultPoolPath());
+ return db.validateResourcePlan(desc.getResourcePlanName()) ? 0 : 1;
}
+ WMResourcePlan resourcePlan = desc.getResourcePlan();
final WorkloadManager wm = WorkloadManager.getInstance();
final TezSessionPoolManager pm = TezSessionPoolManager.getInstance();
boolean isActivate = false, isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
- if (desc.getStatus() != null) {
- resourcePlan.setStatus(desc.getStatus());
- isActivate = desc.getStatus() == WMResourcePlanStatus.ACTIVE;
+ if (resourcePlan.getStatus() != null) {
+ resourcePlan.setStatus(resourcePlan.getStatus());
+ isActivate = resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE;
}
WMFullResourcePlan appliedRp = db.alterResourcePlan(
- desc.getRpName(), resourcePlan, desc.isEnableActivate());
+ desc.getResourcePlanName(), resourcePlan, desc.isEnableActivate());
if (!isActivate || (wm == null && isInTest) || (pm == null && isInTest)) {
return 0;
}
+
if (appliedRp == null) {
throw new HiveException("Cannot get a resource plan to apply");
// TODO: shut down HS2?
}
- final String name = (desc.getNewName() != null) ? desc.getNewName() : desc.getRpName();
+ final String name = resourcePlan.getName();
LOG.info("Activating a new resource plan " + name + ": " + appliedRp);
if (wm != null) {
// Note: as per our current constraints, the behavior of two parallel activates is
@@ -765,18 +771,12 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
}
private int createWMTrigger(Hive db, CreateWMTriggerDesc desc) throws HiveException {
- WMTrigger trigger = new WMTrigger(desc.getRpName(), desc.getTriggerName());
- trigger.setTriggerExpression(desc.getTriggerExpression());
- trigger.setActionExpression(desc.getActionExpression());
- db.createWMTrigger(trigger);
+ db.createWMTrigger(desc.getTrigger());
return 0;
}
private int alterWMTrigger(Hive db, AlterWMTriggerDesc desc) throws HiveException {
- WMTrigger trigger = new WMTrigger(desc.getRpName(), desc.getTriggerName());
- trigger.setTriggerExpression(desc.getTriggerExpression());
- trigger.setActionExpression(desc.getActionExpression());
- db.alterWMTrigger(trigger);
+ db.alterWMTrigger(desc.getTrigger());
return 0;
}
@@ -785,6 +785,37 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
return 0;
}
+ private int createOrAlterWMPool(Hive db, CreateOrAlterWMPoolDesc desc) throws HiveException {
+ if (desc.isUpdate()) {
+ db.alterWMPool(desc.getPool(), desc.getPoolPath());
+ } else {
+ db.createWMPool(desc.getPool());
+ }
+ return 0;
+ }
+
+ private int dropWMPool(Hive db, DropWMPoolDesc desc) throws HiveException {
+ db.dropWMPool(desc.getResourcePlanName(), desc.getPoolPath());
+ return 0;
+ }
+
+ private int createOrAlterWMMapping(Hive db, CreateOrAlterWMMappingDesc desc) throws HiveException {
+ db.createOrUpdateWMMapping(desc.getMapping(), desc.isUpdate());
+ return 0;
+ }
+
+ private int dropWMMapping(Hive db, DropWMMappingDesc desc) throws HiveException {
+ db.dropWMMapping(desc.getMapping());
+ return 0;
+ }
+
+ private int createOrDropTriggerToPoolMapping(Hive db, CreateOrDropTriggerToPoolMappingDesc desc)
+ throws HiveException {
+ db.createOrDropTriggerToPoolMapping(desc.getResourcePlanName(), desc.getTriggerName(),
+ desc.getPoolPath(), desc.shouldDrop());
+ return 0;
+ }
+
private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws HiveException {
try{
HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler().getMetaHook();
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
index cd232a0..33ee8f7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
@@ -99,7 +99,7 @@ class UserPoolMapping {
}
private static void addMapping(WMMapping mapping, Map<String, Mapping> map, String text) {
- Mapping val = new Mapping(mapping.getPoolName(), mapping.getOrdering());
+ Mapping val = new Mapping(mapping.getPoolPath(), mapping.getOrdering());
Mapping oldValue = map.put(mapping.getEntityName(), val);
if (oldValue != null) {
throw new AssertionError("Duplicate mapping for " + text + " " + mapping.getEntityName()
@@ -120,4 +120,4 @@ class UserPoolMapping {
if (mapping != null) return mapping.fullPoolName;
return defaultPoolPath;
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 3e9fff1..1a37bf7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -134,6 +134,8 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator;
@@ -4812,4 +4814,55 @@ private void constructOneLBLocationMap(FileStatus fSta,
throw new HiveException(e);
}
}
+
+ public void createWMPool(WMPool pool) throws HiveException {
+ try {
+ getMSC().createWMPool(pool);
+ } catch (Exception e) {
+ throw new HiveException(e);
+ }
+ }
+
+ public void alterWMPool(WMPool pool, String poolPath) throws HiveException {
+ try {
+ getMSC().alterWMPool(pool, poolPath);
+ } catch (Exception e) {
+ throw new HiveException(e);
+ }
+ }
+
+ public void dropWMPool(String resourcePlanName, String poolPath) throws HiveException {
+ try {
+ getMSC().dropWMPool(resourcePlanName, poolPath);
+ } catch (Exception e) {
+ throw new HiveException(e);
+ }
+ }
+
+ public void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate)
+ throws HiveException {
+ try {
+ getMSC().createOrUpdateWMMapping(mapping, isUpdate);
+ } catch (Exception e) {
+ throw new HiveException(e);
+ }
+ }
+
+ public void dropWMMapping(WMMapping mapping) throws HiveException {
+ try {
+ getMSC().dropWMMapping(mapping);
+ } catch (Exception e) {
+ throw new HiveException(e);
+ }
+ }
+
+
+ public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath, boolean shouldDrop) throws HiveException {
+ try {
+ getMSC().createOrDropTriggerToPoolMapping(resourcePlanName, triggerName, poolPath, shouldDrop);
+ } catch (Exception e) {
+ throw new HiveException(e);
+ }
+ }
};
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 1fd634c..e5e1b53 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -48,7 +48,11 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
+import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.QueryState;
@@ -101,6 +105,7 @@ import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
import org.apache.hadoop.hive.ql.plan.StatsWork;
import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc;
import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc;
import org.apache.hadoop.hive.ql.plan.DDLWork;
@@ -111,6 +116,8 @@ import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.DropIndexDesc;
import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc;
+import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc;
import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -149,6 +156,8 @@ import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
+import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.serde.serdeConstants;
@@ -565,10 +574,10 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
case HiveParser.TOK_CACHE_METADATA:
analyzeCacheMetadata(ast);
break;
- case HiveParser.TOK_CREATERESOURCEPLAN:
+ case HiveParser.TOK_CREATE_RP:
analyzeCreateResourcePlan(ast);
break;
- case HiveParser.TOK_SHOWRESOURCEPLAN:
+ case HiveParser.TOK_SHOW_RP:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowResourcePlan(ast);
break;
@@ -587,6 +596,24 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
case HiveParser.TOK_DROP_TRIGGER:
analyzeDropTrigger(ast);
break;
+ case HiveParser.TOK_CREATE_POOL:
+ analyzeCreatePool(ast);
+ break;
+ case HiveParser.TOK_ALTER_POOL:
+ analyzeAlterPool(ast);
+ break;
+ case HiveParser.TOK_DROP_POOL:
+ analyzeDropPool(ast);
+ break;
+ case HiveParser.TOK_CREATE_MAPPING:
+ analyzeCreateOrAlterMapping(ast, false);
+ break;
+ case HiveParser.TOK_ALTER_MAPPING:
+ analyzeCreateOrAlterMapping(ast, true);
+ break;
+ case HiveParser.TOK_DROP_MAPPING:
+ analyzeDropMapping(ast);
+ break;
default:
throw new SemanticException("Unsupported command: " + ast);
}
@@ -872,11 +899,17 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
}
String resourcePlanName = unescapeIdentifier(ast.getChild(0).getText());
Integer queryParallelism = null;
- if (ast.getChildCount() > 1) {
- queryParallelism = Integer.parseInt(ast.getChild(1).getText());
- }
- if (ast.getChildCount() > 2) {
- throw new SemanticException("Invalid token in CREATE RESOURCE PLAN statement");
+ for (int i = 1; i < ast.getChildCount(); ++i) {
+ Tree child = ast.getChild(i);
+ if (child.getType() == HiveParser.TOK_QUERY_PARALLELISM) {
+ if (queryParallelism == null) {
+ queryParallelism = Integer.parseInt(child.getChild(0).getText());
+ } else {
+ throw new SemanticException("QUERY_PARALLELISM should be set only once.");
+ }
+ } else {
+ throw new SemanticException("Invalid set in create resource plan: " + child.getText());
+ }
}
CreateResourcePlanDesc desc = new CreateResourcePlanDesc(resourcePlanName, queryParallelism);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
@@ -901,71 +934,56 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
throw new SemanticException("Invalid syntax for ALTER RESOURCE PLAN statement");
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
- AlterResourcePlanDesc desc = null;
+ WMResourcePlan resourcePlan = new WMResourcePlan(rpName);
+ boolean isEnableActive = false;
+ boolean validate = false;
for (int i = 1; i < ast.getChildCount(); ++i) {
Tree child = ast.getChild(i);
switch (child.getType()) {
case HiveParser.TOK_VALIDATE:
- if (desc != null) throw new SemanticException("Invalid ALTER VALIDATE command");
- desc = AlterResourcePlanDesc.createValidatePlan(rpName);
+ validate = true;
break;
case HiveParser.TOK_ACTIVATE:
- if (desc == null) {
- desc = AlterResourcePlanDesc.createChangeStatus(rpName, WMResourcePlanStatus.ACTIVE);
- } else if (desc.getStatus() == WMResourcePlanStatus.ENABLED) {
- desc.setIsEnableActivate(true);
- desc.setStatus(WMResourcePlanStatus.ACTIVE);
- } else {
- throw new SemanticException("Invalid ALTER ACTIVATE command");
+ if (resourcePlan.getStatus() == WMResourcePlanStatus.ENABLED) {
+ isEnableActive = true;
}
+ resourcePlan.setStatus(WMResourcePlanStatus.ACTIVE);
break;
case HiveParser.TOK_ENABLE:
- if (desc == null) {
- desc = AlterResourcePlanDesc.createChangeStatus(rpName, WMResourcePlanStatus.ENABLED);
- } else if (desc.getStatus() == WMResourcePlanStatus.ACTIVE) {
- desc.setIsEnableActivate(true);
+ if (resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE) {
+ isEnableActive = true;
} else {
- throw new SemanticException("Invalid ALTER ENABLE command");
+ resourcePlan.setStatus(WMResourcePlanStatus.ENABLED);
}
break;
case HiveParser.TOK_DISABLE:
- if (desc != null) throw new SemanticException("Invalid ALTER DISABLE command");
- desc = AlterResourcePlanDesc.createChangeStatus(rpName, WMResourcePlanStatus.DISABLED);
+ resourcePlan.setStatus(WMResourcePlanStatus.DISABLED);
break;
case HiveParser.TOK_QUERY_PARALLELISM:
if (child.getChildCount() != 1) {
throw new SemanticException("Expected one argument");
}
- if (desc == null) {
- desc = AlterResourcePlanDesc.createSet(rpName);
- }
- desc.setQueryParallelism(Integer.parseInt(child.getChild(0).getText()));
+ resourcePlan.setQueryParallelism(Integer.parseInt(child.getChild(0).getText()));
break;
case HiveParser.TOK_DEFAULT_POOL:
if (child.getChildCount() != 1) {
throw new SemanticException("Expected one argument");
}
- if (desc == null) {
- desc = AlterResourcePlanDesc.createSet(rpName);
- }
- desc.setDefaultPoolPath(child.getChild(0).getText());
+ resourcePlan.setDefaultPoolPath(poolPath(child.getChild(0)));
break;
case HiveParser.TOK_RENAME:
- if (desc != null) throw new SemanticException("Invalid ALTER RENAME command");
if (ast.getChildCount() == (i + 1)) {
throw new SemanticException("Expected an argument");
}
- if (desc == null) {
- desc = AlterResourcePlanDesc.createSet(rpName);
- }
- desc.setNewName(ast.getChild(++i).getText());
+ resourcePlan.setName(unescapeIdentifier(ast.getChild(++i).getText()));
break;
default:
throw new SemanticException(
"Unexpected token in alter resource plan statement: " + child.getType());
}
}
- rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+ new AlterResourcePlanDesc(resourcePlan, rpName, validate, isEnableActive)), conf));
}
private void analyzeDropResourcePlan(ASTNode ast) throws SemanticException {
@@ -987,10 +1005,12 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
String triggerExpression = buildTriggerExpression((ASTNode)ast.getChild(2));
String actionExpression = buildTriggerActionExpression((ASTNode)ast.getChild(3));
- CreateWMTriggerDesc desc =
- new CreateWMTriggerDesc(rpName, triggerName, triggerExpression, actionExpression);
- rootTasks.add(TaskFactory.get(
- new DDLWork(getInputs(), getOutputs(), desc), conf));
+ WMTrigger trigger = new WMTrigger(rpName, triggerName);
+ trigger.setTriggerExpression(triggerExpression);
+ trigger.setActionExpression(actionExpression);
+
+ CreateWMTriggerDesc desc = new CreateWMTriggerDesc(trigger);
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
}
private String buildTriggerExpression(ASTNode ast) throws SemanticException {
@@ -1006,11 +1026,12 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
return builder.toString();
}
- private String poolPath(ASTNode ast) {
+ private String poolPath(Tree ast) {
StringBuilder builder = new StringBuilder();
- builder.append(ast.getText());
+ builder.append(unescapeIdentifier(ast.getText()));
for (int i = 0; i < ast.getChildCount(); ++i) {
- builder.append(ast.getChild(i).getText());
+ // DOT is not affected
+ builder.append(unescapeIdentifier(ast.getChild(i).getText()));
}
return builder.toString();
}
@@ -1023,7 +1044,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
if (ast.getChildCount() != 1) {
throw new SemanticException("Invalid move to clause in trigger action.");
}
- String poolPath = poolPath((ASTNode)ast.getChild(0));
+ String poolPath = poolPath(ast.getChild(0));
return "MOVE TO " + poolPath;
default:
throw new SemanticException("Unknown token in action clause: " + ast.getType());
@@ -1039,24 +1060,145 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
String triggerExpression = buildTriggerExpression((ASTNode)ast.getChild(2));
String actionExpression = buildTriggerActionExpression((ASTNode)ast.getChild(3));
- AlterWMTriggerDesc desc =
- new AlterWMTriggerDesc(rpName, triggerName, triggerExpression, actionExpression);
- rootTasks.add(TaskFactory.get(
- new DDLWork(getInputs(), getOutputs(), desc), conf));
+ WMTrigger trigger = new WMTrigger(rpName, triggerName);
+ trigger.setTriggerExpression(triggerExpression);
+ trigger.setActionExpression(actionExpression);
+
+ AlterWMTriggerDesc desc = new AlterWMTriggerDesc(trigger);
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
}
private void analyzeDropTrigger(ASTNode ast) throws SemanticException {
if (ast.getChildCount() != 2) {
throw new SemanticException("Invalid syntax for drop trigger.");
}
- String rpName = ast.getChild(0).getText();
- String triggerName = ast.getChild(1).getText();
+ String rpName = unescapeIdentifier(ast.getChild(0).getText());
+ String triggerName = unescapeIdentifier(ast.getChild(1).getText());
DropWMTriggerDesc desc = new DropWMTriggerDesc(rpName, triggerName);
rootTasks.add(TaskFactory.get(
new DDLWork(getInputs(), getOutputs(), desc), conf));
}
+ private void analyzeCreatePool(ASTNode ast) throws SemanticException {
+ if (ast.getChildCount() != 5) {
+ throw new SemanticException("Invalid syntax for create pool.");
+ }
+ String rpName = unescapeIdentifier(ast.getChild(0).getText());
+ String poolPath = poolPath(ast.getChild(1));
+ WMPool pool = new WMPool(rpName, poolPath);
+ for (int i = 2; i < ast.getChildCount(); ++i) {
+ Tree child = ast.getChild(i);
+ if (child.getChildCount() != 1) {
+ throw new SemanticException("Expected 1 paramter for: " + child.getText());
+ }
+ String param = child.getChild(0).getText();
+ switch (child.getType()) {
+ case HiveParser.TOK_ALLOC_FRACTION:
+ pool.setAllocFraction(Double.parseDouble(param));
+ break;
+ case HiveParser.TOK_QUERY_PARALLELISM:
+ pool.setQueryParallelism(Integer.parseInt(param));
+ break;
+ case HiveParser.TOK_SCHEDULING_POLICY:
+ pool.setSchedulingPolicy(PlanUtils.stripQuotes(param));
+ break;
+ case HiveParser.TOK_PATH:
+ throw new SemanticException("Invalid parameter path in create pool");
+ }
+ }
+ if (!pool.isSetAllocFraction()) {
+ throw new SemanticException("alloc_fraction should be specified for a pool");
+ }
+ CreateOrAlterWMPoolDesc desc = new CreateOrAlterWMPoolDesc(pool, poolPath, false);
+ rootTasks.add(TaskFactory.get(
+ new DDLWork(getInputs(), getOutputs(), desc), conf));
+ }
+
+ private void analyzeAlterPool(ASTNode ast) throws SemanticException {
+ if (ast.getChildCount() < 3) {
+ throw new SemanticException("Invalid syntax for alter pool.");
+ }
+ String rpName = unescapeIdentifier(ast.getChild(0).getText());
+ String poolPath = poolPath(ast.getChild(1));
+ WMPool pool = new WMPool(rpName, poolPath);
+
+ for (int i = 2; i < ast.getChildCount(); ++i) {
+ Tree child = ast.getChild(i);
+ if (child.getChildCount() != 1) {
+ throw new SemanticException("Invalid syntax in alter pool expected parameter.");
+ }
+ Tree param = child.getChild(0);
+ switch (child.getType()) {
+ case HiveParser.TOK_ALLOC_FRACTION:
+ pool.setAllocFraction(Double.parseDouble(param.getText()));
+ break;
+ case HiveParser.TOK_QUERY_PARALLELISM:
+ pool.setQueryParallelism(Integer.parseInt(param.getText()));
+ break;
+ case HiveParser.TOK_SCHEDULING_POLICY:
+ pool.setSchedulingPolicy(PlanUtils.stripQuotes(param.getText()));
+ break;
+ case HiveParser.TOK_PATH:
+ pool.setPoolPath(poolPath(param));
+ break;
+ case HiveParser.TOK_ADD_TRIGGER:
+ case HiveParser.TOK_DROP_TRIGGER:
+ boolean drop = child.getType() == HiveParser.TOK_DROP_TRIGGER;
+ String triggerName = unescapeIdentifier(param.getText());
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+ new CreateOrDropTriggerToPoolMappingDesc(rpName, triggerName, poolPath, drop)),
+ conf));
+ break;
+ }
+ }
+
+ CreateOrAlterWMPoolDesc desc = new CreateOrAlterWMPoolDesc(pool, poolPath, true);
+ rootTasks.add(TaskFactory.get(
+ new DDLWork(getInputs(), getOutputs(), desc), conf));
+ }
+
+ private void analyzeDropPool(ASTNode ast) throws SemanticException {
+ if (ast.getChildCount() != 2) {
+ throw new SemanticException("Invalid syntax for drop pool.");
+ }
+ String rpName = unescapeIdentifier(ast.getChild(0).getText());
+ String poolPath = poolPath(ast.getChild(1));
+
+ DropWMPoolDesc desc = new DropWMPoolDesc(rpName, poolPath);
+ rootTasks.add(TaskFactory.get(
+ new DDLWork(getInputs(), getOutputs(), desc), conf));
+ }
+
+ private void analyzeCreateOrAlterMapping(ASTNode ast, boolean update) throws SemanticException {
+ if (ast.getChildCount() < 4) {
+ throw new SemanticException("Invalid syntax for create or alter mapping.");
+ }
+ String rpName = unescapeIdentifier(ast.getChild(0).getText());
+ String entityType = ast.getChild(1).getText();
+ String entityName = PlanUtils.stripQuotes(ast.getChild(2).getText());
+ WMMapping mapping = new WMMapping(rpName, entityType, entityName);
+ mapping.setPoolPath(poolPath(ast.getChild(3)));
+ if (ast.getChildCount() == 5) {
+ mapping.setOrdering(Integer.valueOf(ast.getChild(4).getText()));
+ }
+
+ CreateOrAlterWMMappingDesc desc = new CreateOrAlterWMMappingDesc(mapping, update);
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
+ }
+
+ private void analyzeDropMapping(ASTNode ast) throws SemanticException {
+ if (ast.getChildCount() != 3) {
+ throw new SemanticException("Invalid syntax for drop mapping.");
+ }
+ String rpName = unescapeIdentifier(ast.getChild(0).getText());
+ String entityType = ast.getChild(1).getText();
+ String entityName = PlanUtils.stripQuotes(ast.getChild(2).getText());
+
+ DropWMMappingDesc desc = new DropWMMappingDesc(new WMMapping(rpName, entityType, entityName));
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
+ }
+
private void analyzeCreateDatabase(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
boolean ifNotExists = false;
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index d61fce9..1dcfe9d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -362,6 +362,10 @@ KW_DEFAULT: 'DEFAULT';
KW_POOL: 'POOL';
KW_MOVE: 'MOVE';
KW_DO: 'DO';
+KW_ALLOC_FRACTION: 'ALLOC_FRACTION';
+KW_SCHEDULING_POLICY: 'SCHEDULING_POLICY';
+KW_PATH: 'PATH';
+KW_MAPPING: 'MAPPING';
// Operators
// NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index e99d5fb..1378950 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -24,7 +24,7 @@ ASTLabelType=ASTNode;
backtrack=false;
k=3;
}
-import SelectClauseParser, FromClauseParser, IdentifiersParser;
+import SelectClauseParser, FromClauseParser, IdentifiersParser, ResourcePlanParser;
tokens {
TOK_INSERT;
@@ -409,8 +409,8 @@ TOK_EXPRESSION;
TOK_DETAIL;
TOK_BLOCKING;
TOK_KILL_QUERY;
-TOK_CREATERESOURCEPLAN;
-TOK_SHOWRESOURCEPLAN;
+TOK_CREATE_RP;
+TOK_SHOW_RP;
TOK_ALTER_RP;
TOK_DROP_RP;
TOK_VALIDATE;
@@ -422,6 +422,16 @@ TOK_CREATE_TRIGGER;
TOK_ALTER_TRIGGER;
TOK_DROP_TRIGGER;
TOK_TRIGGER_EXPRESSION;
+TOK_CREATE_POOL;
+TOK_ALTER_POOL;
+TOK_DROP_POOL;
+TOK_ALLOC_FRACTION;
+TOK_SCHEDULING_POLICY;
+TOK_PATH;
+TOK_CREATE_MAPPING;
+TOK_ALTER_MAPPING;
+TOK_DROP_MAPPING;
+TOK_ADD_TRIGGER;
}
@@ -603,6 +613,9 @@ import org.apache.hadoop.hive.conf.HiveConf;
xlateMap.put("KW_POOL", "POOL");
xlateMap.put("KW_MOVE", "MOVE");
xlateMap.put("KW_DO", "DO");
+ xlateMap.put("KW_ALLOC_FRACTION", "ALLOC_FRACTION");
+ xlateMap.put("KW_SCHEDULING_POLICY", "SCHEDULING_POLICY");
+ xlateMap.put("KW_PATH", "PATH");
// Operators
xlateMap.put("DOT", ".");
@@ -941,12 +954,7 @@ ddlStatement
| showCurrentRole
| abortTransactionStatement
| killQueryStatement
- | createResourcePlanStatement
- | alterResourcePlanStatement
- | dropResourcePlanStatement
- | createTriggerStatement
- | alterTriggerStatement
- | dropTriggerStatement
+ | resourcePlanDdlStatements
;
ifExists
@@ -1000,129 +1008,6 @@ orReplace
-> ^(TOK_ORREPLACE)
;
-createResourcePlanStatement
-@init { pushMsg("create resource plan statement", state); }
-@after { popMsg(state); }
- : KW_CREATE KW_RESOURCE KW_PLAN
- name=identifier
- (KW_WITH KW_QUERY_PARALLELISM parallelism=Number)?
- -> ^(TOK_CREATERESOURCEPLAN $name $parallelism?)
- ;
-
-alterRpSet
-@init { pushMsg("alterRpSet", state); }
-@after { popMsg(state); }
- : (
- (KW_QUERY_PARALLELISM EQUAL parallelism=Number -> ^(TOK_QUERY_PARALLELISM $parallelism))
- | (KW_DEFAULT KW_POOL EQUAL poolName=StringLiteral -> ^(TOK_DEFAULT_POOL $poolName))
- )
- ;
-
-alterRpSetList
-@init { pushMsg("alterRpSetList", state); }
-@after { popMsg(state); }
- :
- alterRpSet (COMMA alterRpSet)* -> alterRpSet+
- ;
-
-activate : KW_ACTIVATE -> ^(TOK_ACTIVATE);
-enable : KW_ENABLE -> ^(TOK_ENABLE);
-
-alterResourcePlanStatement
-@init { pushMsg("alter resource plan statement", state); }
-@after { popMsg(state); }
- : KW_ALTER KW_RESOURCE KW_PLAN name=identifier (
- (KW_VALIDATE -> ^(TOK_ALTER_RP $name TOK_VALIDATE))
- | (KW_DISABLE -> ^(TOK_ALTER_RP $name TOK_DISABLE))
- | (KW_SET setList=alterRpSetList -> ^(TOK_ALTER_RP $name $setList))
- | (KW_RENAME KW_TO newName=identifier
- -> ^(TOK_ALTER_RP $name TOK_RENAME $newName))
- | ((activate+ enable? | enable+ activate?) -> ^(TOK_ALTER_RP $name activate? enable?))
- )
- ;
-
-dropResourcePlanStatement
-@init { pushMsg("drop resource plan statement", state); }
-@after { popMsg(state); }
- : KW_DROP KW_RESOURCE KW_PLAN name=identifier
- -> ^(TOK_DROP_RP $name)
- ;
-
-poolPath
-@init { pushMsg("poolPath", state); }
-@after { popMsg(state); }
- : identifier^ (DOT identifier)*
- ;
-
-triggerExpression
-@init { pushMsg("triggerExpression", state); }
-@after { popMsg(state); }
- : triggerOrExpression -> ^(TOK_TRIGGER_EXPRESSION triggerOrExpression)
- ;
-
-triggerOrExpression
-@init { pushMsg("triggerOrExpression", state); }
-@after { popMsg(state); }
- : triggerAndExpression (KW_OR triggerAndExpression)*
- ;
-
-triggerAndExpression
-@init { pushMsg("triggerAndExpression", state); }
-@after { popMsg(state); }
- : triggerAtomExpression (KW_AND triggerAtomExpression)*
- ;
-
-triggerAtomExpression
-@init { pushMsg("triggerAtomExpression", state); }
-@after { popMsg(state); }
- : (identifier comparisionOperator triggerLiteral)
- | (LPAREN triggerOrExpression RPAREN)
- ;
-
-triggerLiteral
-@init { pushMsg("triggerLiteral", state); }
-@after { popMsg(state); }
- : (Number (KW_HOUR|KW_MINUTE|KW_SECOND)?)
- | ByteLengthLiteral
- | StringLiteral
- ;
-
-comparisionOperator
-@init { pushMsg("comparisionOperator", state); }
-@after { popMsg(state); }
- : EQUAL | LESSTHAN | LESSTHANOREQUALTO | GREATERTHAN | GREATERTHANOREQUALTO
- ;
-
-triggerActionExpression
-@init { pushMsg("triggerActionExpression", state); }
-@after { popMsg(state); }
- : KW_KILL
- | (KW_MOVE^ KW_TO! poolPath)
- ;
-
-createTriggerStatement
-@init { pushMsg("create trigger statement", state); }
-@after { popMsg(state); }
- : KW_CREATE KW_TRIGGER rpName=identifier DOT triggerName=identifier
- KW_WHEN triggerExpression KW_DO triggerActionExpression
- -> ^(TOK_CREATE_TRIGGER $rpName $triggerName triggerExpression triggerActionExpression)
- ;
-
-alterTriggerStatement
-@init { pushMsg("alter trigger statement", state); }
-@after { popMsg(state); }
- : KW_ALTER KW_TRIGGER rpName=identifier DOT triggerName=identifier
- KW_WHEN triggerExpression KW_DO triggerActionExpression
- -> ^(TOK_ALTER_TRIGGER $rpName $triggerName triggerExpression triggerActionExpression)
- ;
-
-dropTriggerStatement
-@init { pushMsg("drop trigger statement", state); }
-@after { popMsg(state); }
- : KW_DROP KW_TRIGGER rpName=identifier DOT triggerName=identifier
- -> ^(TOK_DROP_TRIGGER $rpName $triggerName)
- ;
-
createDatabaseStatement
@init { pushMsg("create database statement", state); }
@after { popMsg(state); }
@@ -1775,8 +1660,8 @@ showStatement
| KW_SHOW KW_CONF StringLiteral -> ^(TOK_SHOWCONF StringLiteral)
| KW_SHOW KW_RESOURCE
(
- (KW_PLAN rp_name=identifier -> ^(TOK_SHOWRESOURCEPLAN $rp_name))
- | (KW_PLANS -> ^(TOK_SHOWRESOURCEPLAN))
+ (KW_PLAN rp_name=identifier -> ^(TOK_SHOW_RP $rp_name))
+ | (KW_PLANS -> ^(TOK_SHOW_RP))
)
;
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index a0eca4b..f1ca301 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -830,8 +830,8 @@ nonReserved
| KW_ZONE
| KW_TIMESTAMPTZ
| KW_DEFAULT
- | KW_POOL
-
+ | KW_RESOURCE | KW_PLAN | KW_PLANS | KW_QUERY_PARALLELISM | KW_ACTIVATE | KW_MOVE | KW_DO
+ | KW_POOL | KW_ALLOC_FRACTION | KW_SCHEDULING_POLICY | KW_PATH | KW_MAPPING
;
//The following SQL2011 reserved keywords are used as function name only, but not as identifiers.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
index d9a16a2..13b8d81 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
@@ -31,6 +31,9 @@ import org.antlr.runtime.tree.CommonTreeAdaptor;
import org.antlr.runtime.tree.TreeAdaptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
+import com.sun.tools.extcheck.Main;
+
import org.apache.hadoop.hive.ql.Context;
/**
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
new file mode 100644
index 0000000..95c8725
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
@@ -0,0 +1,230 @@
+/**
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+parser grammar ResourcePlanParser;
+
+options
+{
+ output=AST;
+ ASTLabelType=ASTNode;
+ backtrack=false;
+ k=3;
+}
+
+resourcePlanDdlStatements
+ : createResourcePlanStatement
+ | alterResourcePlanStatement
+ | dropResourcePlanStatement
+ | createTriggerStatement
+ | alterTriggerStatement
+ | dropTriggerStatement
+ | createPoolStatement
+ | alterPoolStatement
+ | dropPoolStatement
+ | createMappingStatement
+ | alterMappingStatement
+ | dropMappingStatement
+ ;
+
+rpAssign
+@init { gParent.pushMsg("rpAssign", state); }
+@after { gParent.popMsg(state); }
+ : (
+ (KW_QUERY_PARALLELISM EQUAL parallelism=Number) -> ^(TOK_QUERY_PARALLELISM $parallelism)
+ | (KW_DEFAULT KW_POOL EQUAL poolPath) -> ^(TOK_DEFAULT_POOL poolPath)
+ )
+ ;
+
+rpAssignList
+@init { gParent.pushMsg("rpAssignList", state); }
+@after { gParent.popMsg(state); }
+ : rpAssign (COMMA rpAssign)* -> rpAssign+
+ ;
+
+createResourcePlanStatement
+@init { gParent.pushMsg("create resource plan statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_CREATE KW_RESOURCE KW_PLAN name=identifier (KW_WITH rpAssignList)?
+ -> ^(TOK_CREATE_RP $name rpAssignList?)
+ ;
+
+activate : KW_ACTIVATE -> ^(TOK_ACTIVATE);
+enable : KW_ENABLE -> ^(TOK_ENABLE);
+
+alterResourcePlanStatement
+@init { gParent.pushMsg("alter resource plan statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_ALTER KW_RESOURCE KW_PLAN name=identifier (
+ (KW_VALIDATE -> ^(TOK_ALTER_RP $name TOK_VALIDATE))
+ | (KW_DISABLE -> ^(TOK_ALTER_RP $name TOK_DISABLE))
+ | (KW_SET rpAssignList -> ^(TOK_ALTER_RP $name rpAssignList))
+ | (KW_RENAME KW_TO newName=identifier -> ^(TOK_ALTER_RP $name TOK_RENAME $newName))
+ | ((activate enable? | enable activate?) -> ^(TOK_ALTER_RP $name activate? enable?))
+ )
+ ;
+
+dropResourcePlanStatement
+@init { gParent.pushMsg("drop resource plan statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_DROP KW_RESOURCE KW_PLAN name=identifier -> ^(TOK_DROP_RP $name)
+ ;
+
+poolPath
+@init { gParent.pushMsg("poolPath", state); }
+@after { gParent.popMsg(state); }
+ : identifier^ (DOT identifier)*
+ ;
+
+triggerExpression
+@init { gParent.pushMsg("triggerExpression", state); }
+@after { gParent.popMsg(state); }
+ : triggerOrExpression -> ^(TOK_TRIGGER_EXPRESSION triggerOrExpression)
+ ;
+
+triggerOrExpression
+@init { gParent.pushMsg("triggerOrExpression", state); }
+@after { gParent.popMsg(state); }
+ : triggerAndExpression (KW_OR triggerAndExpression)*
+ ;
+
+triggerAndExpression
+@init { gParent.pushMsg("triggerAndExpression", state); }
+@after { gParent.popMsg(state); }
+ : triggerAtomExpression (KW_AND triggerAtomExpression)*
+ ;
+
+triggerAtomExpression
+@init { gParent.pushMsg("triggerAtomExpression", state); }
+@after { gParent.popMsg(state); }
+ : (identifier comparisionOperator triggerLiteral)
+ | (LPAREN triggerOrExpression RPAREN)
+ ;
+
+triggerLiteral
+@init { gParent.pushMsg("triggerLiteral", state); }
+@after { gParent.popMsg(state); }
+ : (Number (KW_HOUR|KW_MINUTE|KW_SECOND)?)
+ | ByteLengthLiteral
+ | StringLiteral
+ ;
+
+comparisionOperator
+@init { gParent.pushMsg("comparisionOperator", state); }
+@after { gParent.popMsg(state); }
+ : EQUAL | LESSTHAN | LESSTHANOREQUALTO | GREATERTHAN | GREATERTHANOREQUALTO
+ ;
+
+triggerActionExpression
+@init { gParent.pushMsg("triggerActionExpression", state); }
+@after { gParent.popMsg(state); }
+ : KW_KILL
+ | (KW_MOVE^ KW_TO! poolPath)
+ ;
+
+createTriggerStatement
+@init { gParent.pushMsg("create trigger statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_CREATE KW_TRIGGER rpName=identifier DOT triggerName=identifier
+ KW_WHEN triggerExpression KW_DO triggerActionExpression
+ -> ^(TOK_CREATE_TRIGGER $rpName $triggerName triggerExpression triggerActionExpression)
+ ;
+
+alterTriggerStatement
+@init { gParent.pushMsg("alter trigger statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_ALTER KW_TRIGGER rpName=identifier DOT triggerName=identifier
+ KW_WHEN triggerExpression KW_DO triggerActionExpression
+ -> ^(TOK_ALTER_TRIGGER $rpName $triggerName triggerExpression triggerActionExpression)
+ ;
+
+dropTriggerStatement
+@init { gParent.pushMsg("drop trigger statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_DROP KW_TRIGGER rpName=identifier DOT triggerName=identifier
+ -> ^(TOK_DROP_TRIGGER $rpName $triggerName)
+ ;
+
+poolAssign
+@init { gParent.pushMsg("poolAssign", state); }
+@after { gParent.popMsg(state); }
+ : (
+ (KW_ALLOC_FRACTION EQUAL allocFraction=Number) -> ^(TOK_ALLOC_FRACTION $allocFraction)
+ | (KW_QUERY_PARALLELISM EQUAL parallelism=Number) -> ^(TOK_QUERY_PARALLELISM $parallelism)
+ | (KW_SCHEDULING_POLICY EQUAL policy=StringLiteral) -> ^(TOK_SCHEDULING_POLICY $policy)
+ | (KW_PATH EQUAL path=poolPath) -> ^(TOK_PATH $path)
+ )
+ ;
+
+poolAssignList
+@init { gParent.pushMsg("poolAssignList", state); }
+@after { gParent.popMsg(state); }
+ : poolAssign (COMMA poolAssign)* -> poolAssign+
+ ;
+
+createPoolStatement
+@init { gParent.pushMsg("create pool statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_CREATE KW_POOL rpName=identifier DOT poolPath
+ KW_WITH poolAssignList
+ -> ^(TOK_CREATE_POOL $rpName poolPath poolAssignList)
+ ;
+
+alterPoolStatement
+@init { gParent.pushMsg("alter pool statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_ALTER KW_POOL rpName=identifier DOT poolPath (
+ (KW_SET poolAssignList -> ^(TOK_ALTER_POOL $rpName poolPath poolAssignList))
+ | (KW_ADD KW_TRIGGER triggerName=identifier
+ -> ^(TOK_ALTER_POOL $rpName poolPath ^(TOK_ADD_TRIGGER $triggerName)))
+ | (KW_DROP KW_TRIGGER triggerName=identifier
+ -> ^(TOK_ALTER_POOL $rpName poolPath ^(TOK_DROP_TRIGGER $triggerName)))
+ )
+ ;
+
+dropPoolStatement
+@init { gParent.pushMsg("drop pool statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_DROP KW_POOL rpName=identifier DOT poolPath
+ -> ^(TOK_DROP_POOL $rpName poolPath)
+ ;
+
+createMappingStatement
+@init { gParent.pushMsg("create mapping statement", state); }
+@after { gParent.popMsg(state); }
+ : (KW_CREATE mappingType=(KW_USER | KW_GROUP)
+ KW_MAPPING name=StringLiteral
+ KW_IN rpName=identifier KW_TO poolPath
+ (KW_WITH KW_ORDER order=Number)?)
+ -> ^(TOK_CREATE_MAPPING $rpName $mappingType $name poolPath $order?)
+ ;
+
+alterMappingStatement
+@init { gParent.pushMsg("alter mapping statement", state); }
+@after { gParent.popMsg(state); }
+ : (KW_ALTER mappingType=(KW_USER | KW_GROUP) KW_MAPPING
+ KW_MAPPING name=StringLiteral
+ KW_IN rpName=identifier KW_TO poolPath
+ (KW_WITH KW_ORDER order=Number)?)
+ -> ^(TOK_ALTER_MAPPING $rpName $mappingType $name poolPath $order?)
+ ;
+
+dropMappingStatement
+@init { gParent.pushMsg("drop mapping statement", state); }
+@after { gParent.popMsg(state); }
+ : KW_DROP mappingType=(KW_USER | KW_GROUP) KW_MAPPING
+ name=StringLiteral KW_IN rpName=identifier
+ -> ^(TOK_DROP_MAPPING $rpName $mappingType $name)
+ ;
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
index a3b3287..a25e78c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
@@ -137,13 +137,19 @@ public final class SemanticAnalyzerFactory {
commandType.put(HiveParser.TOK_REPL_LOAD, HiveOperation.REPLLOAD);
commandType.put(HiveParser.TOK_REPL_STATUS, HiveOperation.REPLSTATUS);
commandType.put(HiveParser.TOK_KILL_QUERY, HiveOperation.KILL_QUERY);
- commandType.put(HiveParser.TOK_CREATERESOURCEPLAN, HiveOperation.CREATE_RESOURCEPLAN);
- commandType.put(HiveParser.TOK_SHOWRESOURCEPLAN, HiveOperation.SHOW_RESOURCEPLAN);
+ commandType.put(HiveParser.TOK_CREATE_RP, HiveOperation.CREATE_RESOURCEPLAN);
+ commandType.put(HiveParser.TOK_SHOW_RP, HiveOperation.SHOW_RESOURCEPLAN);
commandType.put(HiveParser.TOK_ALTER_RP, HiveOperation.ALTER_RESOURCEPLAN);
commandType.put(HiveParser.TOK_DROP_RP, HiveOperation.DROP_RESOURCEPLAN);
commandType.put(HiveParser.TOK_CREATE_TRIGGER, HiveOperation.CREATE_TRIGGER);
commandType.put(HiveParser.TOK_ALTER_TRIGGER, HiveOperation.ALTER_TRIGGER);
commandType.put(HiveParser.TOK_DROP_TRIGGER, HiveOperation.DROP_TRIGGER);
+ commandType.put(HiveParser.TOK_CREATE_POOL, HiveOperation.CREATE_POOL);
+ commandType.put(HiveParser.TOK_ALTER_POOL, HiveOperation.ALTER_POOL);
+ commandType.put(HiveParser.TOK_DROP_POOL, HiveOperation.DROP_POOL);
+ commandType.put(HiveParser.TOK_CREATE_MAPPING, HiveOperation.CREATE_MAPPING);
+ commandType.put(HiveParser.TOK_ALTER_MAPPING, HiveOperation.ALTER_MAPPING);
+ commandType.put(HiveParser.TOK_DROP_MAPPING, HiveOperation.DROP_MAPPING);
}
static {
@@ -337,13 +343,19 @@ public final class SemanticAnalyzerFactory {
case HiveParser.TOK_SHOW_SET_ROLE:
case HiveParser.TOK_CACHE_METADATA:
case HiveParser.TOK_KILL_QUERY:
- case HiveParser.TOK_CREATERESOURCEPLAN:
- case HiveParser.TOK_SHOWRESOURCEPLAN:
+ case HiveParser.TOK_CREATE_RP:
+ case HiveParser.TOK_SHOW_RP:
case HiveParser.TOK_ALTER_RP:
case HiveParser.TOK_DROP_RP:
case HiveParser.TOK_CREATE_TRIGGER:
case HiveParser.TOK_ALTER_TRIGGER:
case HiveParser.TOK_DROP_TRIGGER:
+ case HiveParser.TOK_CREATE_POOL:
+ case HiveParser.TOK_ALTER_POOL:
+ case HiveParser.TOK_DROP_POOL:
+ case HiveParser.TOK_CREATE_MAPPING:
+ case HiveParser.TOK_ALTER_MAPPING:
+ case HiveParser.TOK_DROP_MAPPING:
return new DDLSemanticAnalyzer(queryState);
case HiveParser.TOK_CREATEFUNCTION:
[02/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 7d7d28d..68d6c9c 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -14763,7 +14763,7 @@ class WMMapping:
- resourcePlanName
- entityType
- entityName
- - poolName
+ - poolPath
- ordering
"""
@@ -14772,15 +14772,15 @@ class WMMapping:
(1, TType.STRING, 'resourcePlanName', None, None, ), # 1
(2, TType.STRING, 'entityType', None, None, ), # 2
(3, TType.STRING, 'entityName', None, None, ), # 3
- (4, TType.STRING, 'poolName', None, None, ), # 4
+ (4, TType.STRING, 'poolPath', None, None, ), # 4
(5, TType.I32, 'ordering', None, None, ), # 5
)
- def __init__(self, resourcePlanName=None, entityType=None, entityName=None, poolName=None, ordering=None,):
+ def __init__(self, resourcePlanName=None, entityType=None, entityName=None, poolPath=None, ordering=None,):
self.resourcePlanName = resourcePlanName
self.entityType = entityType
self.entityName = entityName
- self.poolName = poolName
+ self.poolPath = poolPath
self.ordering = ordering
def read(self, iprot):
@@ -14809,7 +14809,7 @@ class WMMapping:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
- self.poolName = iprot.readString()
+ self.poolPath = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
@@ -14839,9 +14839,9 @@ class WMMapping:
oprot.writeFieldBegin('entityName', TType.STRING, 3)
oprot.writeString(self.entityName)
oprot.writeFieldEnd()
- if self.poolName is not None:
- oprot.writeFieldBegin('poolName', TType.STRING, 4)
- oprot.writeString(self.poolName)
+ if self.poolPath is not None:
+ oprot.writeFieldBegin('poolPath', TType.STRING, 4)
+ oprot.writeString(self.poolPath)
oprot.writeFieldEnd()
if self.ordering is not None:
oprot.writeFieldBegin('ordering', TType.I32, 5)
@@ -14865,7 +14865,7 @@ class WMMapping:
value = (value * 31) ^ hash(self.resourcePlanName)
value = (value * 31) ^ hash(self.entityType)
value = (value * 31) ^ hash(self.entityName)
- value = (value * 31) ^ hash(self.poolName)
+ value = (value * 31) ^ hash(self.poolPath)
value = (value * 31) ^ hash(self.ordering)
return value
@@ -16481,6 +16481,754 @@ class WMGetTriggersForResourePlanResponse:
def __ne__(self, other):
return not (self == other)
+class WMCreatePoolRequest:
+ """
+ Attributes:
+ - pool
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'pool', (WMPool, WMPool.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, pool=None,):
+ self.pool = pool
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.pool = WMPool()
+ self.pool.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMCreatePoolRequest')
+ if self.pool is not None:
+ oprot.writeFieldBegin('pool', TType.STRUCT, 1)
+ self.pool.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.pool)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMCreatePoolResponse:
+
+ thrift_spec = (
+ )
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMCreatePoolResponse')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMAlterPoolRequest:
+ """
+ Attributes:
+ - pool
+ - poolPath
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'pool', (WMPool, WMPool.thrift_spec), None, ), # 1
+ (2, TType.STRING, 'poolPath', None, None, ), # 2
+ )
+
+ def __init__(self, pool=None, poolPath=None,):
+ self.pool = pool
+ self.poolPath = poolPath
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.pool = WMPool()
+ self.pool.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.poolPath = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMAlterPoolRequest')
+ if self.pool is not None:
+ oprot.writeFieldBegin('pool', TType.STRUCT, 1)
+ self.pool.write(oprot)
+ oprot.writeFieldEnd()
+ if self.poolPath is not None:
+ oprot.writeFieldBegin('poolPath', TType.STRING, 2)
+ oprot.writeString(self.poolPath)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.pool)
+ value = (value * 31) ^ hash(self.poolPath)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMAlterPoolResponse:
+
+ thrift_spec = (
+ )
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMAlterPoolResponse')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMDropPoolRequest:
+ """
+ Attributes:
+ - resourcePlanName
+ - poolPath
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'resourcePlanName', None, None, ), # 1
+ (2, TType.STRING, 'poolPath', None, None, ), # 2
+ )
+
+ def __init__(self, resourcePlanName=None, poolPath=None,):
+ self.resourcePlanName = resourcePlanName
+ self.poolPath = poolPath
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.resourcePlanName = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.poolPath = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMDropPoolRequest')
+ if self.resourcePlanName is not None:
+ oprot.writeFieldBegin('resourcePlanName', TType.STRING, 1)
+ oprot.writeString(self.resourcePlanName)
+ oprot.writeFieldEnd()
+ if self.poolPath is not None:
+ oprot.writeFieldBegin('poolPath', TType.STRING, 2)
+ oprot.writeString(self.poolPath)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.resourcePlanName)
+ value = (value * 31) ^ hash(self.poolPath)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMDropPoolResponse:
+
+ thrift_spec = (
+ )
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMDropPoolResponse')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMCreateOrUpdateMappingRequest:
+ """
+ Attributes:
+ - mapping
+ - update
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'mapping', (WMMapping, WMMapping.thrift_spec), None, ), # 1
+ (2, TType.BOOL, 'update', None, None, ), # 2
+ )
+
+ def __init__(self, mapping=None, update=None,):
+ self.mapping = mapping
+ self.update = update
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.mapping = WMMapping()
+ self.mapping.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.BOOL:
+ self.update = iprot.readBool()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMCreateOrUpdateMappingRequest')
+ if self.mapping is not None:
+ oprot.writeFieldBegin('mapping', TType.STRUCT, 1)
+ self.mapping.write(oprot)
+ oprot.writeFieldEnd()
+ if self.update is not None:
+ oprot.writeFieldBegin('update', TType.BOOL, 2)
+ oprot.writeBool(self.update)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.mapping)
+ value = (value * 31) ^ hash(self.update)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMCreateOrUpdateMappingResponse:
+
+ thrift_spec = (
+ )
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMCreateOrUpdateMappingResponse')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMDropMappingRequest:
+ """
+ Attributes:
+ - mapping
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'mapping', (WMMapping, WMMapping.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, mapping=None,):
+ self.mapping = mapping
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.mapping = WMMapping()
+ self.mapping.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMDropMappingRequest')
+ if self.mapping is not None:
+ oprot.writeFieldBegin('mapping', TType.STRUCT, 1)
+ self.mapping.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.mapping)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMDropMappingResponse:
+
+ thrift_spec = (
+ )
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMDropMappingResponse')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMCreateOrDropTriggerToPoolMappingRequest:
+ """
+ Attributes:
+ - resourcePlanName
+ - triggerName
+ - poolPath
+ - drop
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'resourcePlanName', None, None, ), # 1
+ (2, TType.STRING, 'triggerName', None, None, ), # 2
+ (3, TType.STRING, 'poolPath', None, None, ), # 3
+ (4, TType.BOOL, 'drop', None, None, ), # 4
+ )
+
+ def __init__(self, resourcePlanName=None, triggerName=None, poolPath=None, drop=None,):
+ self.resourcePlanName = resourcePlanName
+ self.triggerName = triggerName
+ self.poolPath = poolPath
+ self.drop = drop
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.resourcePlanName = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.triggerName = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.poolPath = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.BOOL:
+ self.drop = iprot.readBool()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMCreateOrDropTriggerToPoolMappingRequest')
+ if self.resourcePlanName is not None:
+ oprot.writeFieldBegin('resourcePlanName', TType.STRING, 1)
+ oprot.writeString(self.resourcePlanName)
+ oprot.writeFieldEnd()
+ if self.triggerName is not None:
+ oprot.writeFieldBegin('triggerName', TType.STRING, 2)
+ oprot.writeString(self.triggerName)
+ oprot.writeFieldEnd()
+ if self.poolPath is not None:
+ oprot.writeFieldBegin('poolPath', TType.STRING, 3)
+ oprot.writeString(self.poolPath)
+ oprot.writeFieldEnd()
+ if self.drop is not None:
+ oprot.writeFieldBegin('drop', TType.BOOL, 4)
+ oprot.writeBool(self.drop)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.resourcePlanName)
+ value = (value * 31) ^ hash(self.triggerName)
+ value = (value * 31) ^ hash(self.poolPath)
+ value = (value * 31) ^ hash(self.drop)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class WMCreateOrDropTriggerToPoolMappingResponse:
+
+ thrift_spec = (
+ )
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('WMCreateOrDropTriggerToPoolMappingResponse')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class MetaException(TException):
"""
Attributes:
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 192f881..cc57c85 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -3334,14 +3334,14 @@ class WMMapping
RESOURCEPLANNAME = 1
ENTITYTYPE = 2
ENTITYNAME = 3
- POOLNAME = 4
+ POOLPATH = 4
ORDERING = 5
FIELDS = {
RESOURCEPLANNAME => {:type => ::Thrift::Types::STRING, :name => 'resourcePlanName'},
ENTITYTYPE => {:type => ::Thrift::Types::STRING, :name => 'entityType'},
ENTITYNAME => {:type => ::Thrift::Types::STRING, :name => 'entityName'},
- POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName', :optional => true},
+ POOLPATH => {:type => ::Thrift::Types::STRING, :name => 'poolPath', :optional => true},
ORDERING => {:type => ::Thrift::Types::I32, :name => 'ordering', :optional => true}
}
@@ -3753,6 +3753,204 @@ class WMGetTriggersForResourePlanResponse
::Thrift::Struct.generate_accessors self
end
+class WMCreatePoolRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ POOL = 1
+
+ FIELDS = {
+ POOL => {:type => ::Thrift::Types::STRUCT, :name => 'pool', :class => ::WMPool, :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMCreatePoolResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMAlterPoolRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ POOL = 1
+ POOLPATH = 2
+
+ FIELDS = {
+ POOL => {:type => ::Thrift::Types::STRUCT, :name => 'pool', :class => ::WMPool, :optional => true},
+ POOLPATH => {:type => ::Thrift::Types::STRING, :name => 'poolPath', :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMAlterPoolResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMDropPoolRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RESOURCEPLANNAME = 1
+ POOLPATH = 2
+
+ FIELDS = {
+ RESOURCEPLANNAME => {:type => ::Thrift::Types::STRING, :name => 'resourcePlanName', :optional => true},
+ POOLPATH => {:type => ::Thrift::Types::STRING, :name => 'poolPath', :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMDropPoolResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMCreateOrUpdateMappingRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ MAPPING = 1
+ UPDATE = 2
+
+ FIELDS = {
+ MAPPING => {:type => ::Thrift::Types::STRUCT, :name => 'mapping', :class => ::WMMapping, :optional => true},
+ UPDATE => {:type => ::Thrift::Types::BOOL, :name => 'update', :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMCreateOrUpdateMappingResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMDropMappingRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ MAPPING = 1
+
+ FIELDS = {
+ MAPPING => {:type => ::Thrift::Types::STRUCT, :name => 'mapping', :class => ::WMMapping, :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMDropMappingResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMCreateOrDropTriggerToPoolMappingRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RESOURCEPLANNAME = 1
+ TRIGGERNAME = 2
+ POOLPATH = 3
+ DROP = 4
+
+ FIELDS = {
+ RESOURCEPLANNAME => {:type => ::Thrift::Types::STRING, :name => 'resourcePlanName', :optional => true},
+ TRIGGERNAME => {:type => ::Thrift::Types::STRING, :name => 'triggerName', :optional => true},
+ POOLPATH => {:type => ::Thrift::Types::STRING, :name => 'poolPath', :optional => true},
+ DROP => {:type => ::Thrift::Types::BOOL, :name => 'drop', :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class WMCreateOrDropTriggerToPoolMappingResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
class MetaException < ::Thrift::Exception
include ::Thrift::Struct, ::Thrift::Struct_Union
def initialize(message=nil)
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 9fd7045..182cc37 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -2885,6 +2885,118 @@ module ThriftHiveMetastore
raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_triggers_for_resourceplan failed: unknown result')
end
+ def create_wm_pool(request)
+ send_create_wm_pool(request)
+ return recv_create_wm_pool()
+ end
+
+ def send_create_wm_pool(request)
+ send_message('create_wm_pool', Create_wm_pool_args, :request => request)
+ end
+
+ def recv_create_wm_pool()
+ result = receive_message(Create_wm_pool_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ raise result.o4 unless result.o4.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'create_wm_pool failed: unknown result')
+ end
+
+ def alter_wm_pool(request)
+ send_alter_wm_pool(request)
+ return recv_alter_wm_pool()
+ end
+
+ def send_alter_wm_pool(request)
+ send_message('alter_wm_pool', Alter_wm_pool_args, :request => request)
+ end
+
+ def recv_alter_wm_pool()
+ result = receive_message(Alter_wm_pool_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ raise result.o4 unless result.o4.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'alter_wm_pool failed: unknown result')
+ end
+
+ def drop_wm_pool(request)
+ send_drop_wm_pool(request)
+ return recv_drop_wm_pool()
+ end
+
+ def send_drop_wm_pool(request)
+ send_message('drop_wm_pool', Drop_wm_pool_args, :request => request)
+ end
+
+ def recv_drop_wm_pool()
+ result = receive_message(Drop_wm_pool_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_wm_pool failed: unknown result')
+ end
+
+ def create_or_update_wm_mapping(request)
+ send_create_or_update_wm_mapping(request)
+ return recv_create_or_update_wm_mapping()
+ end
+
+ def send_create_or_update_wm_mapping(request)
+ send_message('create_or_update_wm_mapping', Create_or_update_wm_mapping_args, :request => request)
+ end
+
+ def recv_create_or_update_wm_mapping()
+ result = receive_message(Create_or_update_wm_mapping_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ raise result.o4 unless result.o4.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'create_or_update_wm_mapping failed: unknown result')
+ end
+
+ def drop_wm_mapping(request)
+ send_drop_wm_mapping(request)
+ return recv_drop_wm_mapping()
+ end
+
+ def send_drop_wm_mapping(request)
+ send_message('drop_wm_mapping', Drop_wm_mapping_args, :request => request)
+ end
+
+ def recv_drop_wm_mapping()
+ result = receive_message(Drop_wm_mapping_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_wm_mapping failed: unknown result')
+ end
+
+ def create_or_drop_wm_trigger_to_pool_mapping(request)
+ send_create_or_drop_wm_trigger_to_pool_mapping(request)
+ return recv_create_or_drop_wm_trigger_to_pool_mapping()
+ end
+
+ def send_create_or_drop_wm_trigger_to_pool_mapping(request)
+ send_message('create_or_drop_wm_trigger_to_pool_mapping', Create_or_drop_wm_trigger_to_pool_mapping_args, :request => request)
+ end
+
+ def recv_create_or_drop_wm_trigger_to_pool_mapping()
+ result = receive_message(Create_or_drop_wm_trigger_to_pool_mapping_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ raise result.o4 unless result.o4.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'create_or_drop_wm_trigger_to_pool_mapping failed: unknown result')
+ end
+
end
class Processor < ::FacebookService::Processor
@@ -5029,6 +5141,104 @@ module ThriftHiveMetastore
write_result(result, oprot, 'get_triggers_for_resourceplan', seqid)
end
+ def process_create_wm_pool(seqid, iprot, oprot)
+ args = read_args(iprot, Create_wm_pool_args)
+ result = Create_wm_pool_result.new()
+ begin
+ result.success = @handler.create_wm_pool(args.request)
+ rescue ::AlreadyExistsException => o1
+ result.o1 = o1
+ rescue ::NoSuchObjectException => o2
+ result.o2 = o2
+ rescue ::InvalidObjectException => o3
+ result.o3 = o3
+ rescue ::MetaException => o4
+ result.o4 = o4
+ end
+ write_result(result, oprot, 'create_wm_pool', seqid)
+ end
+
+ def process_alter_wm_pool(seqid, iprot, oprot)
+ args = read_args(iprot, Alter_wm_pool_args)
+ result = Alter_wm_pool_result.new()
+ begin
+ result.success = @handler.alter_wm_pool(args.request)
+ rescue ::AlreadyExistsException => o1
+ result.o1 = o1
+ rescue ::NoSuchObjectException => o2
+ result.o2 = o2
+ rescue ::InvalidObjectException => o3
+ result.o3 = o3
+ rescue ::MetaException => o4
+ result.o4 = o4
+ end
+ write_result(result, oprot, 'alter_wm_pool', seqid)
+ end
+
+ def process_drop_wm_pool(seqid, iprot, oprot)
+ args = read_args(iprot, Drop_wm_pool_args)
+ result = Drop_wm_pool_result.new()
+ begin
+ result.success = @handler.drop_wm_pool(args.request)
+ rescue ::NoSuchObjectException => o1
+ result.o1 = o1
+ rescue ::InvalidOperationException => o2
+ result.o2 = o2
+ rescue ::MetaException => o3
+ result.o3 = o3
+ end
+ write_result(result, oprot, 'drop_wm_pool', seqid)
+ end
+
+ def process_create_or_update_wm_mapping(seqid, iprot, oprot)
+ args = read_args(iprot, Create_or_update_wm_mapping_args)
+ result = Create_or_update_wm_mapping_result.new()
+ begin
+ result.success = @handler.create_or_update_wm_mapping(args.request)
+ rescue ::AlreadyExistsException => o1
+ result.o1 = o1
+ rescue ::NoSuchObjectException => o2
+ result.o2 = o2
+ rescue ::InvalidObjectException => o3
+ result.o3 = o3
+ rescue ::MetaException => o4
+ result.o4 = o4
+ end
+ write_result(result, oprot, 'create_or_update_wm_mapping', seqid)
+ end
+
+ def process_drop_wm_mapping(seqid, iprot, oprot)
+ args = read_args(iprot, Drop_wm_mapping_args)
+ result = Drop_wm_mapping_result.new()
+ begin
+ result.success = @handler.drop_wm_mapping(args.request)
+ rescue ::NoSuchObjectException => o1
+ result.o1 = o1
+ rescue ::InvalidOperationException => o2
+ result.o2 = o2
+ rescue ::MetaException => o3
+ result.o3 = o3
+ end
+ write_result(result, oprot, 'drop_wm_mapping', seqid)
+ end
+
+ def process_create_or_drop_wm_trigger_to_pool_mapping(seqid, iprot, oprot)
+ args = read_args(iprot, Create_or_drop_wm_trigger_to_pool_mapping_args)
+ result = Create_or_drop_wm_trigger_to_pool_mapping_result.new()
+ begin
+ result.success = @handler.create_or_drop_wm_trigger_to_pool_mapping(args.request)
+ rescue ::AlreadyExistsException => o1
+ result.o1 = o1
+ rescue ::NoSuchObjectException => o2
+ result.o2 = o2
+ rescue ::InvalidObjectException => o3
+ result.o3 = o3
+ rescue ::MetaException => o4
+ result.o4 = o4
+ end
+ write_result(result, oprot, 'create_or_drop_wm_trigger_to_pool_mapping', seqid)
+ end
+
end
# HELPER FUNCTIONS AND STRUCTURES
@@ -11490,5 +11700,241 @@ module ThriftHiveMetastore
::Thrift::Struct.generate_accessors self
end
+ class Create_wm_pool_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ REQUEST = 1
+
+ FIELDS = {
+ REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMCreatePoolRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Create_wm_pool_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+ O3 = 3
+ O4 = 4
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMCreatePoolResponse},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException},
+ O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Alter_wm_pool_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ REQUEST = 1
+
+ FIELDS = {
+ REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMAlterPoolRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Alter_wm_pool_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+ O3 = 3
+ O4 = 4
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMAlterPoolResponse},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException},
+ O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Drop_wm_pool_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ REQUEST = 1
+
+ FIELDS = {
+ REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMDropPoolRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Drop_wm_pool_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+ O3 = 3
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMDropPoolResponse},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Create_or_update_wm_mapping_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ REQUEST = 1
+
+ FIELDS = {
+ REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMCreateOrUpdateMappingRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Create_or_update_wm_mapping_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+ O3 = 3
+ O4 = 4
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMCreateOrUpdateMappingResponse},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException},
+ O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Drop_wm_mapping_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ REQUEST = 1
+
+ FIELDS = {
+ REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMDropMappingRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Drop_wm_mapping_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+ O3 = 3
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMDropMappingResponse},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Create_or_drop_wm_trigger_to_pool_mapping_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ REQUEST = 1
+
+ FIELDS = {
+ REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMCreateOrDropTriggerToPoolMappingRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Create_or_drop_wm_trigger_to_pool_mapping_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+ O3 = 3
+ O4 = 4
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMCreateOrDropTriggerToPoolMappingResponse},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException},
+ O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
end
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 7a636aa..78efe38 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -7395,7 +7395,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
getMS().dropWMTrigger(request.getResourcePlanName(), request.getTriggerName());
return new WMDropTriggerResponse();
} catch (MetaException e) {
- LOG.error("Exception while trying to retrieve resource plans", e);
+ LOG.error("Exception while trying to drop trigger.", e);
throw e;
}
}
@@ -7414,6 +7414,88 @@ public class HiveMetaStore extends ThriftHiveMetastore {
throw e;
}
}
+
+ @Override
+ public WMCreatePoolResponse create_wm_pool(WMCreatePoolRequest request)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException,
+ TException {
+ try {
+ getMS().createPool(request.getPool());
+ return new WMCreatePoolResponse();
+ } catch (MetaException e) {
+ LOG.error("Exception while trying to create WMPool", e);
+ throw e;
+ }
+ }
+
+ @Override
+ public WMAlterPoolResponse alter_wm_pool(WMAlterPoolRequest request)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException,
+ TException {
+ try {
+ getMS().alterPool(request.getPool(), request.getPoolPath());
+ return new WMAlterPoolResponse();
+ } catch (MetaException e) {
+ LOG.error("Exception while trying to alter WMPool", e);
+ throw e;
+ }
+ }
+
+ @Override
+ public WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request)
+ throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+ try {
+ getMS().dropWMPool(request.getResourcePlanName(), request.getPoolPath());
+ return new WMDropPoolResponse();
+ } catch (MetaException e) {
+ LOG.error("Exception while trying to drop WMPool", e);
+ throw e;
+ }
+ }
+
+ @Override
+ public WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(
+ WMCreateOrUpdateMappingRequest request) throws AlreadyExistsException,
+ NoSuchObjectException, InvalidObjectException, MetaException, TException {
+ try {
+ getMS().createOrUpdateWMMapping(request.getMapping(), request.isUpdate());
+ return new WMCreateOrUpdateMappingResponse();
+ } catch (MetaException e) {
+ LOG.error("Exception while trying to create or update WMMapping", e);
+ throw e;
+ }
+ }
+
+ @Override
+ public WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request)
+ throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+ try {
+ getMS().dropWMMapping(request.getMapping());
+ return new WMDropMappingResponse();
+ } catch (MetaException e) {
+ LOG.error("Exception while trying to drop WMMapping", e);
+ throw e;
+ }
+ }
+
+ @Override
+ public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(
+ WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException,
+ NoSuchObjectException, InvalidObjectException, MetaException, TException {
+ try {
+ if (request.isDrop()) {
+ getMS().dropWMTriggerToPoolMapping(
+ request.getResourcePlanName(), request.getTriggerName(), request.getPoolPath());
+ } else {
+ getMS().createWMTriggerToPoolMapping(
+ request.getResourcePlanName(), request.getTriggerName(), request.getPoolPath());
+ }
+ return new WMCreateOrDropTriggerToPoolMappingResponse();
+ } catch (MetaException e) {
+ LOG.error("Exception while trying to create or drop pool mappings", e);
+ throw e;
+ }
+ }
}
private static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, Configuration conf)
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 0dc8c39..63cb52e 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -25,6 +25,7 @@ import com.google.common.collect.Sets;
import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
import org.apache.hadoop.hive.metastore.api.WMMapping;
import org.apache.hadoop.hive.metastore.model.MWMMapping;
+import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType;
import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.model.MWMPool;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
@@ -9528,20 +9529,26 @@ public class ObjectStore implements RawStore, Configurable {
@Override
public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
- throws AlreadyExistsException, MetaException {
+ throws AlreadyExistsException, InvalidObjectException, MetaException {
boolean commited = false;
String rpName = normalizeIdentifier(resourcePlan.getName());
Integer queryParallelism = resourcePlan.isSetQueryParallelism() ?
resourcePlan.getQueryParallelism() : null;
MWMResourcePlan rp = new MWMResourcePlan(
rpName, queryParallelism, MWMResourcePlan.Status.DISABLED);
+ if (rpName.isEmpty()) {
+ throw new InvalidObjectException("Resource name cannot be empty.");
+ }
+ if (queryParallelism != null && queryParallelism <= 0) {
+ throw new InvalidObjectException("Query parallelism should be positive.");
+ }
try {
openTransaction();
pm.makePersistent(rp);
// TODO: ideally, this should be moved outside to HiveMetaStore to be shared between
// all the RawStore-s. Right now there's no method to create a pool.
if (defaultPoolSize > 0) {
- MWMPool defaultPool = new MWMPool(rp, "default", null, 1.0, defaultPoolSize, null);
+ MWMPool defaultPool = new MWMPool(rp, "default", 1.0, defaultPoolSize, null);
pm.makePersistent(defaultPool);
rp.setPools(Sets.newHashSet(defaultPool));
rp.setDefaultPool(defaultPool);
@@ -9609,7 +9616,7 @@ public class ObjectStore implements RawStore, Configurable {
WMMapping result = new WMMapping(rpName,
mMapping.getEntityType().toString(), mMapping.getEntityName());
if (mMapping.getPool() != null) {
- result.setPoolName(mMapping.getPool().getPath());
+ result.setPoolPath(mMapping.getPool().getPath());
}
if (mMapping.getOrdering() != null) {
result.setOrdering(mMapping.getOrdering());
@@ -9619,10 +9626,16 @@ public class ObjectStore implements RawStore, Configurable {
@Override
public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException {
- return fromMResourcePlan(getMWMResourcePlan(name));
+ try {
+ return fromMResourcePlan(getMWMResourcePlan(name, false));
+ } catch (InvalidOperationException e) {
+ // Should not happen, edit check is false.
+ throw new RuntimeException(e);
+ }
}
- public MWMResourcePlan getMWMResourcePlan(String name) throws NoSuchObjectException {
+ private MWMResourcePlan getMWMResourcePlan(String name, boolean editCheck)
+ throws NoSuchObjectException, InvalidOperationException {
MWMResourcePlan resourcePlan;
boolean commited = false;
Query query = null;
@@ -9642,6 +9655,9 @@ public class ObjectStore implements RawStore, Configurable {
if (resourcePlan == null) {
throw new NoSuchObjectException("There is no resource plan named: " + name);
}
+ if (editCheck && resourcePlan.getStatus() != MWMResourcePlan.Status.DISABLED) {
+ throw new InvalidOperationException("Resource plan must be disabled to edit it.");
+ }
return resourcePlan;
}
@@ -9681,31 +9697,32 @@ public class ObjectStore implements RawStore, Configurable {
WMFullResourcePlan result = null;
try {
openTransaction();
- query = pm.newQuery(MWMResourcePlan.class, "name == rpName");
- query.declareParameters("java.lang.String rpName");
- query.setUnique(true);
- MWMResourcePlan mResourcePlan = (MWMResourcePlan) query.execute(name);
- if (mResourcePlan == null) {
- throw new NoSuchObjectException("Cannot find resource plan: " + name);
+ MWMResourcePlan mResourcePlan = getMWMResourcePlan(name, !resourcePlan.isSetStatus());
+ if (resourcePlan.isSetStatus() && (resourcePlan.isSetQueryParallelism() ||
+ resourcePlan.isSetDefaultPoolPath() || !resourcePlan.getName().equals(name))) {
+ throw new InvalidOperationException("Cannot change values during status switch.");
}
if (!resourcePlan.getName().equals(name)) {
+ String newName = normalizeIdentifier(resourcePlan.getName());
+ if (newName.isEmpty()) {
+ throw new InvalidOperationException("Cannot rename to empty value.");
+ }
mResourcePlan.setName(resourcePlan.getName());
}
if (resourcePlan.isSetQueryParallelism()) {
+ if (resourcePlan.getQueryParallelism() <= 0) {
+ throw new InvalidOperationException("queryParallelism should be positive.");
+ }
mResourcePlan.setQueryParallelism(resourcePlan.getQueryParallelism());
}
+ if (resourcePlan.isSetDefaultPoolPath()) {
+ MWMPool pool = getPool(mResourcePlan, resourcePlan.getDefaultPoolPath());
+ mResourcePlan.setDefaultPool(pool);
+ }
if (resourcePlan.isSetStatus()) {
result = switchStatus(
name, mResourcePlan, resourcePlan.getStatus().name(), canActivateDisabled);
}
- if (resourcePlan.isSetDefaultPoolPath()) {
- MWMPool pool = getPoolByPath(resourcePlan, resourcePlan.getDefaultPoolPath());
- if (pool == null) {
- throw new NoSuchObjectException(
- "Cannot find pool: " + resourcePlan.getDefaultPoolPath());
- }
- mResourcePlan.setDefaultPool(pool);
- }
commited = commitTransaction();
return result;
} catch (Exception e) {
@@ -9716,25 +9733,6 @@ public class ObjectStore implements RawStore, Configurable {
}
}
-
- private MWMPool getPoolByPath(WMResourcePlan parent, String path) {
- // Note: this doesn't do recursion because we will do that on create/alter.
- boolean commited = false;
- Query query = null;
- try {
- openTransaction();
- query = pm.newQuery(MWMPool.class, "path == pname and resourcePlan == rp");
- query.declareParameters("java.lang.String pname, MWMResourcePlan rp");
- query.setUnique(true);
- MWMPool pool = (MWMPool) query.execute(path, parent);
- pm.retrieve(pool);
- commited = commitTransaction();
- return pool;
- } finally {
- rollbackAndCleanup(commited, query);
- }
- }
-
@Override
public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
boolean commited = false;
@@ -9893,11 +9891,7 @@ public class ObjectStore implements RawStore, Configurable {
boolean commited = false;
try {
openTransaction();
- MWMResourcePlan resourcePlan = getMWMResourcePlan(
- normalizeIdentifier(trigger.getResourcePlanName()));
- if (resourcePlan.getStatus() != MWMResourcePlan.Status.DISABLED) {
- throw new InvalidOperationException("Resource plan must be disabled to edit it.");
- }
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(trigger.getResourcePlanName(), true);
MWMTrigger mTrigger = new MWMTrigger(resourcePlan,
normalizeIdentifier(trigger.getTriggerName()), trigger.getTriggerExpression(),
trigger.getActionExpression(), null);
@@ -9918,23 +9912,35 @@ public class ObjectStore implements RawStore, Configurable {
Query query = null;
try {
openTransaction();
- MWMResourcePlan resourcePlan = getMWMResourcePlan(
- normalizeIdentifier(trigger.getResourcePlanName()));
- if (resourcePlan.getStatus() != MWMResourcePlan.Status.DISABLED) {
- throw new InvalidOperationException("Resource plan must be disabled to edit it.");
- }
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(trigger.getResourcePlanName(), true);
+ MWMTrigger mTrigger = getTrigger(resourcePlan, trigger.getTriggerName());
+ // Update the object.
+ mTrigger.setTriggerExpression(trigger.getTriggerExpression());
+ mTrigger.setActionExpression(trigger.getActionExpression());
+ commited = commitTransaction();
+ } finally {
+ rollbackAndCleanup(commited, query);
+ }
+ }
+ private MWMTrigger getTrigger(MWMResourcePlan resourcePlan, String triggerName)
+ throws NoSuchObjectException {
+ triggerName = normalizeIdentifier(triggerName);
+ boolean commited = false;
+ Query query = null;
+ try {
+ openTransaction();
// Get the MWMTrigger object from DN
query = pm.newQuery(MWMTrigger.class, "resourcePlan == rp && name == triggerName");
query.declareParameters("MWMResourcePlan rp, java.lang.String triggerName");
query.setUnique(true);
- MWMTrigger mTrigger = (MWMTrigger) query.execute(resourcePlan, trigger.getTriggerName());
+ MWMTrigger mTrigger = (MWMTrigger) query.execute(resourcePlan, triggerName);
+ if (mTrigger == null) {
+ throw new NoSuchObjectException("Cannot find trigger with name: " + triggerName);
+ }
pm.retrieve(mTrigger);
-
- // Update the object.
- mTrigger.setTriggerExpression(trigger.getTriggerExpression());
- mTrigger.setActionExpression(mTrigger.getActionExpression());
commited = commitTransaction();
+ return mTrigger;
} finally {
rollbackAndCleanup(commited, query);
}
@@ -9950,13 +9956,10 @@ public class ObjectStore implements RawStore, Configurable {
Query query = null;
try {
openTransaction();
- MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName);
- if (resourcePlan.getStatus() != MWMResourcePlan.Status.DISABLED) {
- throw new InvalidOperationException("Resource plan must be disabled to edit it.");
- }
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName, true);
query = pm.newQuery(MWMTrigger.class, "resourcePlan == rp && name == triggerName");
query.declareParameters("MWMResourcePlan rp, java.lang.String triggerName");
- if (query.deletePersistentAll(resourcePlan, triggerName) == 0) {
+ if (query.deletePersistentAll(resourcePlan, triggerName) != 1) {
throw new NoSuchObjectException("Cannot delete trigger: " + triggerName);
}
commited = commitTransaction();
@@ -9973,7 +9976,13 @@ public class ObjectStore implements RawStore, Configurable {
Query query = null;
try {
openTransaction();
- MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName);
+ MWMResourcePlan resourcePlan;
+ try {
+ resourcePlan = getMWMResourcePlan(resourcePlanName, false);
+ } catch (InvalidOperationException e) {
+ // Should not happen, edit check is false.
+ throw new RuntimeException(e);
+ }
query = pm.newQuery(MWMTrigger.class, "resourcePlan == rp");
query.declareParameters("MWMResourcePlan rp");
List<MWMTrigger> mTriggers = (List<MWMTrigger>) query.execute(resourcePlan);
@@ -9998,4 +10007,249 @@ public class ObjectStore implements RawStore, Configurable {
trigger.setActionExpression(mTrigger.getActionExpression());
return trigger;
}
+
+ @Override
+ public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ boolean commited = false;
+ try {
+ openTransaction();
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(pool.getResourcePlanName(), true);
+
+ if (!poolParentExists(resourcePlan, pool.getPoolPath())) {
+ throw new NoSuchObjectException("Pool path is invalid, the parent does not exist");
+ }
+ MWMPool mPool = new MWMPool(resourcePlan, pool.getPoolPath(), pool.getAllocFraction(),
+ pool.getQueryParallelism(), pool.getSchedulingPolicy());
+ pm.makePersistent(mPool);
+ commited = commitTransaction();
+ } catch (Exception e) {
+ checkForConstraintException(e, "Pool already exists: ");
+ throw e;
+ } finally {
+ rollbackAndCleanup(commited, (Query)null);
+ }
+ }
+
+ @Override
+ public void alterPool(WMPool pool, String poolPath) throws AlreadyExistsException,
+ NoSuchObjectException, InvalidOperationException, MetaException {
+ boolean commited = false;
+ try {
+ openTransaction();
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(pool.getResourcePlanName(), true);
+ MWMPool mPool = getPool(resourcePlan, poolPath);
+ pm.retrieve(mPool);
+ if (pool.isSetAllocFraction()) {
+ mPool.setAllocFraction(pool.getAllocFraction());
+ }
+ if (pool.isSetQueryParallelism()) {
+ mPool.setQueryParallelism(pool.getQueryParallelism());
+ }
+ if (pool.isSetSchedulingPolicy()) {
+ mPool.setSchedulingPolicy(pool.getSchedulingPolicy());
+ }
+ if (pool.isSetPoolPath() && !pool.getPoolPath().equals(mPool.getPath())) {
+ moveDescendents(resourcePlan, mPool.getPath(), pool.getPoolPath());
+ mPool.setPath(pool.getPoolPath());
+ }
+ commited = commitTransaction();
+ } finally {
+ rollbackAndCleanup(commited, (Query)null);
+ }
+ }
+
+ private MWMPool getPool(MWMResourcePlan resourcePlan, String poolPath)
+ throws NoSuchObjectException {
+ poolPath = normalizeIdentifier(poolPath);
+ boolean commited = false;
+ Query query = null;
+ try {
+ openTransaction();
+ query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path == poolPath");
+ query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath");
+ query.setUnique(true);
+ MWMPool mPool = (MWMPool) query.execute(resourcePlan, poolPath);
+ commited = commitTransaction();
+ if (mPool == null) {
+ throw new NoSuchObjectException("Cannot find pool: " + poolPath);
+ }
+ pm.retrieve(mPool);
+ return mPool;
+ } finally {
+ rollbackAndCleanup(commited, query);
+ }
+ }
+
+ private void moveDescendents(MWMResourcePlan resourcePlan, String path, String newPoolPath)
+ throws NoSuchObjectException {
+ if (!poolParentExists(resourcePlan, newPoolPath)) {
+ throw new NoSuchObjectException("Pool path is invalid, the parent does not exist");
+ }
+ boolean commited = false;
+ Query query = null;
+ openTransaction();
+ try {
+ query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path.startsWith(poolPath)");
+ query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath");
+ List<MWMPool> descPools = (List<MWMPool>) query.execute(resourcePlan, path + ".");
+ pm.retrieveAll(descPools);
+ for (MWMPool pool : descPools) {
+ pool.setPath(newPoolPath + pool.getPath().substring(path.length()));
+ }
+ commited = commitTransaction();
+ } finally {
+ rollbackAndCleanup(commited, query);
+ }
+ }
+
+ private boolean poolParentExists(MWMResourcePlan resourcePlan, String poolPath) {
+ int idx = poolPath.lastIndexOf('.');
+ if (idx == -1) {
+ return true;
+ }
+ String parent = poolPath.substring(0, idx);
+ try {
+ getPool(resourcePlan, parent);
+ return true;
+ } catch (NoSuchObjectException e) {
+ return false;
+ }
+ }
+
+ @Override
+ public void dropWMPool(String resourcePlanName, String poolPath)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ poolPath = normalizeIdentifier(poolPath);
+ boolean commited = false;
+ Query query = null;
+ try {
+ openTransaction();
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName, true);
+ if (resourcePlan.getDefaultPool() != null &&
+ resourcePlan.getDefaultPool().getPath().equals(poolPath)) {
+ throw new InvalidOperationException("Cannot drop default pool of a resource plan");
+ }
+ if (poolHasChildren(resourcePlan, poolPath)) {
+ throw new InvalidOperationException("Pool has children cannot drop.");
+ }
+ query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path.startsWith(poolPath)");
+ query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath");
+ if (query.deletePersistentAll(resourcePlan, poolPath) != 1) {
+ throw new NoSuchObjectException("Cannot delete pool: " + poolPath);
+ }
+ commited = commitTransaction();
+ } finally {
+ rollbackAndCleanup(commited, query);
+ }
+ }
+
+ private boolean poolHasChildren(MWMResourcePlan resourcePlan, String poolPath) {
+ boolean commited = false;
+ Query query = null;
+ try {
+ openTransaction();
+ query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path.startsWith(poolPath)");
+ query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath");
+ query.setResult("count(this)");
+ query.setUnique(true);
+ Long count = (Long) query.execute(resourcePlan, poolPath + ".");
+ commited = commitTransaction();
+ return count != null && count > 0;
+ } finally {
+ rollbackAndCleanup(commited, query);
+ }
+ }
+
+ @Override
+ public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+ MetaException {
+ EntityType entityType = EntityType.valueOf(mapping.getEntityType().trim().toUpperCase());
+ String entityName = normalizeIdentifier(mapping.getEntityName());
+ boolean commited = false;
+ Query query = null;
+ try {
+ openTransaction();
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(mapping.getResourcePlanName(), true);
+ MWMPool pool = getPool(resourcePlan, mapping.getPoolPath());
+ if (!update) {
+ MWMMapping mMapping = new MWMMapping(resourcePlan, entityType, entityName, pool,
+ mapping.getOrdering());
+ pm.makePersistent(mMapping);
+ } else {
+ query = pm.newQuery(MWMPool.class, "resourcePlan == rp && entityType == type " +
+ "&& entityName == name");
+ query.declareParameters(
+ "MWMResourcePlan rp, java.lang.String type, java.lang.String name");
+ query.setUnique(true);
+ MWMMapping mMapping = (MWMMapping) query.execute(resourcePlan, entityType, entityName);
+ mMapping.setPool(pool);
+ }
+ commited = commitTransaction();
+ } finally {
+ rollbackAndCleanup(commited, query);
+ }
+ }
+
+ @Override
+ public void dropWMMapping(WMMapping mapping)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ String entityType = mapping.getEntityType().trim().toUpperCase();
+ String entityName = normalizeIdentifier(mapping.getEntityName());
+ boolean commited = false;
+ Query query = null;
+ try {
+ openTransaction();
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(mapping.getResourcePlanName(), true);
+ query = pm.newQuery(MWMMapping.class,
+ "resourcePlan == rp && entityType == type && entityName == name");
+ query.declareParameters("MWMResourcePlan rp, java.lang.String type, java.lang.String name");
+ if (query.deletePersistentAll(resourcePlan, entityType, entityName) != 1) {
+ throw new NoSuchObjectException("Cannot delete mapping.");
+ }
+ commited = commitTransaction();
+ } finally {
+ rollbackAndCleanup(commited, query);
+ }
+ }
+
+ @Override
+ public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ boolean commited = false;
+ try {
+ openTransaction();
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName, true);
+ MWMPool pool = getPool(resourcePlan, poolPath);
+ MWMTrigger trigger = getTrigger(resourcePlan, triggerName);
+ pool.getTriggers().add(trigger);
+ trigger.getPools().add(pool);
+ pm.makePersistent(pool);
+ pm.makePersistent(trigger);
+ commited = commitTransaction();
+ } finally {
+ rollbackAndCleanup(commited, (Query)null);
+ }
+ }
+
+ @Override
+ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+ boolean commited = false;
+ try {
+ openTransaction();
+ MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName, true);
+ MWMPool pool = getPool(resourcePlan, poolPath);
+ MWMTrigger trigger = getTrigger(resourcePlan, triggerName);
+ pool.getTriggers().remove(trigger);
+ trigger.getPools().remove(pool);
+ pm.makePersistent(pool);
+ pm.makePersistent(trigger);
+ commited = commitTransaction();
+ } finally {
+ rollbackAndCleanup(commited, (Query)null);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index db148a1..dcc626c 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -72,6 +72,8 @@ import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.thrift.TException;
@@ -752,7 +754,7 @@ public interface RawStore extends Configurable {
String getMetastoreDbUuid() throws MetaException;
void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
- throws AlreadyExistsException, MetaException;
+ throws AlreadyExistsException, MetaException, InvalidObjectException;
WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException;
@@ -782,4 +784,27 @@ public interface RawStore extends Configurable {
List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
throws NoSuchObjectException, MetaException;
+
+ void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException;
+
+ void alterPool(WMPool pool, String poolPath) throws AlreadyExistsException,
+ NoSuchObjectException, InvalidOperationException, MetaException;
+
+ void dropWMPool(String resourcePlanName, String poolPath)
+ throws NoSuchObjectException, InvalidOperationException, MetaException;
+
+ void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+ MetaException;
+
+ void dropWMMapping(WMMapping mapping)
+ throws NoSuchObjectException, InvalidOperationException, MetaException;
+
+ void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+ MetaException;
+
+ void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath)
+ throws NoSuchObjectException, InvalidOperationException, MetaException;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 2eb967b..1f6d900 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -91,6 +91,8 @@ import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
@@ -2382,7 +2384,7 @@ public class CachedStore implements RawStore, Configurable {
@Override
public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
- throws AlreadyExistsException, MetaException {
+ throws AlreadyExistsException, InvalidObjectException, MetaException {
rawStore.createResourcePlan(resourcePlan, defaultPoolSize);
}
@@ -2445,6 +2447,50 @@ public class CachedStore implements RawStore, Configurable {
return rawStore.getTriggersForResourcePlan(resourcePlanName);
}
+ @Override
+ public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ rawStore.createPool(pool);
+ }
+
+ @Override
+ public void alterPool(WMPool pool, String poolPath) throws AlreadyExistsException,
+ NoSuchObjectException, InvalidOperationException, MetaException {
+ rawStore.alterPool(pool, poolPath);
+ }
+
+ @Override
+ public void dropWMPool(String resourcePlanName, String poolPath)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ rawStore.dropWMPool(resourcePlanName, poolPath);
+ }
+
+ @Override
+ public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+ MetaException {
+ rawStore.createOrUpdateWMMapping(mapping, update);
+ }
+
+ @Override
+ public void dropWMMapping(WMMapping mapping)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ rawStore.dropWMMapping(mapping);
+ }
+
+ @Override
+ public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ rawStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+ }
+
+ @Override
+ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+ rawStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+ }
+
static boolean isNotInBlackList(String dbName, String tblName) {
String str = dbName + "." + tblName;
for (Pattern pattern : blacklistPatterns) {
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java
index e00a020..01f2fe3 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java
@@ -25,14 +25,13 @@ public class MWMPool {
private String path;
private Double allocFraction;
private Integer queryParallelism;
- private Set<MWMTrigger> triggers;
private String schedulingPolicy;
- private MWMPool parentPool;
+ private Set<MWMTrigger> triggers;
public MWMPool() {}
- public MWMPool(MWMResourcePlan resourcePlan, String path, MWMPool parentPool,
- Double allocFraction, Integer queryParallelism, String schedulingPolicy) {
+ public MWMPool(MWMResourcePlan resourcePlan, String path, Double allocFraction,
+ Integer queryParallelism, String schedulingPolicy) {
this.resourcePlan = resourcePlan;
this.path = path;
this.allocFraction = allocFraction;
@@ -72,14 +71,6 @@ public class MWMPool {
this.queryParallelism = queryParallelism;
}
- public Set<MWMTrigger> getTriggers() {
- return triggers;
- }
-
- public void setTriggers(Set<MWMTrigger> triggers) {
- this.triggers = triggers;
- }
-
public String getSchedulingPolicy() {
return schedulingPolicy;
}
@@ -88,11 +79,11 @@ public class MWMPool {
this.schedulingPolicy = schedulingPolicy;
}
- public MWMPool getParentPool() {
- return parentPool;
+ public Set<MWMTrigger> getTriggers() {
+ return triggers;
}
- public void setParentPool(MWMPool parentPool) {
- this.parentPool = parentPool;
+ public void setTriggers(Set<MWMTrigger> triggers) {
+ this.triggers = triggers;
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/main/resources/package.jdo
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/resources/package.jdo b/standalone-metastore/src/main/resources/package.jdo
index 3242630..57e75f8 100644
--- a/standalone-metastore/src/main/resources/package.jdo
+++ b/standalone-metastore/src/main/resources/package.jdo
@@ -1123,19 +1123,17 @@
<field name="path">
<column name="PATH" length="1024" jdbc-type="VARCHAR" allows-null="false"/>
</field>
- <field name="parentPool">
- <column name="PARENT_POOL_ID" jdbc-type="integer" allows-null="true" target="POOL_ID"/>
- </field>
<field name="allocFraction">
<column name="ALLOC_FRACTION" jdbc-type="double" allows-null="true"/>
</field>
- <field name="schedulingPolicy">
- <column name="SCHEDULING_POLICY" jdbc-type="VARCHAR" allows-null="true"/>
- </field>
<field name="queryParallelism">
<column name="QUERY_PARALLELISM" jdbc-type="integer" allows-null="true"/>
</field>
+ <field name="schedulingPolicy">
+ <column name="SCHEDULING_POLICY" jdbc-type="string" allows-null="true"/>
+ </field>
<field name="triggers" table="WM_POOL_TO_TRIGGER">
+ <collection element-type="MWMTrigger" />
<join>
<column name="POOL_ID" />
</join>
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift
index 4832a6f..3319b1e 100644
--- a/standalone-metastore/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift
@@ -1065,7 +1065,7 @@ struct WMMapping {
1: required string resourcePlanName;
2: required string entityType;
3: required string entityName;
- 4: optional string poolName;
+ 4: optional string poolPath;
5: optional i32 ordering;
}
@@ -1168,6 +1168,53 @@ struct WMGetTriggersForResourePlanResponse {
1: optional list<WMTrigger> triggers;
}
+struct WMCreatePoolRequest {
+ 1: optional WMPool pool;
+}
+
+struct WMCreatePoolResponse {
+}
+
+struct WMAlterPoolRequest {
+ 1: optional WMPool pool;
+ 2: optional string poolPath;
+}
+
+struct WMAlterPoolResponse {
+}
+
+struct WMDropPoolRequest {
+ 1: optional string resourcePlanName;
+ 2: optional string poolPath;
+}
+
+struct WMDropPoolResponse {
+}
+
+struct WMCreateOrUpdateMappingRequest {
+ 1: optional WMMapping mapping;
+ 2: optional bool update;
+}
+
+struct WMCreateOrUpdateMappingResponse {
+}
+
+struct WMDropMappingRequest {
+ 1: optional WMMapping mapping;
+}
+
+struct WMDropMappingResponse {
+}
+
+struct WMCreateOrDropTriggerToPoolMappingRequest {
+ 1: optional string resourcePlanName;
+ 2: optional string triggerName;
+ 3: optional string poolPath;
+ 4: optional bool drop;
+}
+
+struct WMCreateOrDropTriggerToPoolMappingResponse {
+}
// Exceptions.
@@ -1767,6 +1814,24 @@ service ThriftHiveMetastore extends fb303.FacebookService
WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(1:WMGetTriggersForResourePlanRequest request)
throws(1:NoSuchObjectException o1, 2:MetaException o2)
+
+ WMCreatePoolResponse create_wm_pool(1:WMCreatePoolRequest request)
+ throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
+
+ WMAlterPoolResponse alter_wm_pool(1:WMAlterPoolRequest request)
+ throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
+
+ WMDropPoolResponse drop_wm_pool(1:WMDropPoolRequest request)
+ throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+
+ WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(1:WMCreateOrUpdateMappingRequest request)
+ throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
+
+ WMDropMappingResponse drop_wm_mapping(1:WMDropMappingRequest request)
+ throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+
+ WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(1:WMCreateOrDropTriggerToPoolMappingRequest request)
+ throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
}
// * Note about the DDL_TIME: When creating or altering a table or a partition,
[07/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMAlterPoolRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMAlterPoolRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMAlterPoolRequest.java
new file mode 100644
index 0000000..2ea6cf0
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMAlterPoolRequest.java
@@ -0,0 +1,504 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMAlterPoolRequest implements org.apache.thrift.TBase<WMAlterPoolRequest, WMAlterPoolRequest._Fields>, java.io.Serializable, Cloneable, Comparable<WMAlterPoolRequest> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMAlterPoolRequest");
+
+ private static final org.apache.thrift.protocol.TField POOL_FIELD_DESC = new org.apache.thrift.protocol.TField("pool", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField POOL_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("poolPath", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMAlterPoolRequestStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMAlterPoolRequestTupleSchemeFactory());
+ }
+
+ private WMPool pool; // optional
+ private String poolPath; // optional
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ POOL((short)1, "pool"),
+ POOL_PATH((short)2, "poolPath");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // POOL
+ return POOL;
+ case 2: // POOL_PATH
+ return POOL_PATH;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final _Fields optionals[] = {_Fields.POOL,_Fields.POOL_PATH};
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.POOL, new org.apache.thrift.meta_data.FieldMetaData("pool", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMPool.class)));
+ tmpMap.put(_Fields.POOL_PATH, new org.apache.thrift.meta_data.FieldMetaData("poolPath", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMAlterPoolRequest.class, metaDataMap);
+ }
+
+ public WMAlterPoolRequest() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMAlterPoolRequest(WMAlterPoolRequest other) {
+ if (other.isSetPool()) {
+ this.pool = new WMPool(other.pool);
+ }
+ if (other.isSetPoolPath()) {
+ this.poolPath = other.poolPath;
+ }
+ }
+
+ public WMAlterPoolRequest deepCopy() {
+ return new WMAlterPoolRequest(this);
+ }
+
+ @Override
+ public void clear() {
+ this.pool = null;
+ this.poolPath = null;
+ }
+
+ public WMPool getPool() {
+ return this.pool;
+ }
+
+ public void setPool(WMPool pool) {
+ this.pool = pool;
+ }
+
+ public void unsetPool() {
+ this.pool = null;
+ }
+
+ /** Returns true if field pool is set (has been assigned a value) and false otherwise */
+ public boolean isSetPool() {
+ return this.pool != null;
+ }
+
+ public void setPoolIsSet(boolean value) {
+ if (!value) {
+ this.pool = null;
+ }
+ }
+
+ public String getPoolPath() {
+ return this.poolPath;
+ }
+
+ public void setPoolPath(String poolPath) {
+ this.poolPath = poolPath;
+ }
+
+ public void unsetPoolPath() {
+ this.poolPath = null;
+ }
+
+ /** Returns true if field poolPath is set (has been assigned a value) and false otherwise */
+ public boolean isSetPoolPath() {
+ return this.poolPath != null;
+ }
+
+ public void setPoolPathIsSet(boolean value) {
+ if (!value) {
+ this.poolPath = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case POOL:
+ if (value == null) {
+ unsetPool();
+ } else {
+ setPool((WMPool)value);
+ }
+ break;
+
+ case POOL_PATH:
+ if (value == null) {
+ unsetPoolPath();
+ } else {
+ setPoolPath((String)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case POOL:
+ return getPool();
+
+ case POOL_PATH:
+ return getPoolPath();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case POOL:
+ return isSetPool();
+ case POOL_PATH:
+ return isSetPoolPath();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMAlterPoolRequest)
+ return this.equals((WMAlterPoolRequest)that);
+ return false;
+ }
+
+ public boolean equals(WMAlterPoolRequest that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_pool = true && this.isSetPool();
+ boolean that_present_pool = true && that.isSetPool();
+ if (this_present_pool || that_present_pool) {
+ if (!(this_present_pool && that_present_pool))
+ return false;
+ if (!this.pool.equals(that.pool))
+ return false;
+ }
+
+ boolean this_present_poolPath = true && this.isSetPoolPath();
+ boolean that_present_poolPath = true && that.isSetPoolPath();
+ if (this_present_poolPath || that_present_poolPath) {
+ if (!(this_present_poolPath && that_present_poolPath))
+ return false;
+ if (!this.poolPath.equals(that.poolPath))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_pool = true && (isSetPool());
+ list.add(present_pool);
+ if (present_pool)
+ list.add(pool);
+
+ boolean present_poolPath = true && (isSetPoolPath());
+ list.add(present_poolPath);
+ if (present_poolPath)
+ list.add(poolPath);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMAlterPoolRequest other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetPool()).compareTo(other.isSetPool());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetPool()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pool, other.pool);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetPoolPath()).compareTo(other.isSetPoolPath());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetPoolPath()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolPath, other.poolPath);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMAlterPoolRequest(");
+ boolean first = true;
+
+ if (isSetPool()) {
+ sb.append("pool:");
+ if (this.pool == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.pool);
+ }
+ first = false;
+ }
+ if (isSetPoolPath()) {
+ if (!first) sb.append(", ");
+ sb.append("poolPath:");
+ if (this.poolPath == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.poolPath);
+ }
+ first = false;
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ if (pool != null) {
+ pool.validate();
+ }
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMAlterPoolRequestStandardSchemeFactory implements SchemeFactory {
+ public WMAlterPoolRequestStandardScheme getScheme() {
+ return new WMAlterPoolRequestStandardScheme();
+ }
+ }
+
+ private static class WMAlterPoolRequestStandardScheme extends StandardScheme<WMAlterPoolRequest> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMAlterPoolRequest struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // POOL
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.pool = new WMPool();
+ struct.pool.read(iprot);
+ struct.setPoolIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // POOL_PATH
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.poolPath = iprot.readString();
+ struct.setPoolPathIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMAlterPoolRequest struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.pool != null) {
+ if (struct.isSetPool()) {
+ oprot.writeFieldBegin(POOL_FIELD_DESC);
+ struct.pool.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.poolPath != null) {
+ if (struct.isSetPoolPath()) {
+ oprot.writeFieldBegin(POOL_PATH_FIELD_DESC);
+ oprot.writeString(struct.poolPath);
+ oprot.writeFieldEnd();
+ }
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMAlterPoolRequestTupleSchemeFactory implements SchemeFactory {
+ public WMAlterPoolRequestTupleScheme getScheme() {
+ return new WMAlterPoolRequestTupleScheme();
+ }
+ }
+
+ private static class WMAlterPoolRequestTupleScheme extends TupleScheme<WMAlterPoolRequest> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMAlterPoolRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetPool()) {
+ optionals.set(0);
+ }
+ if (struct.isSetPoolPath()) {
+ optionals.set(1);
+ }
+ oprot.writeBitSet(optionals, 2);
+ if (struct.isSetPool()) {
+ struct.pool.write(oprot);
+ }
+ if (struct.isSetPoolPath()) {
+ oprot.writeString(struct.poolPath);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMAlterPoolRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(2);
+ if (incoming.get(0)) {
+ struct.pool = new WMPool();
+ struct.pool.read(iprot);
+ struct.setPoolIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.poolPath = iprot.readString();
+ struct.setPoolPathIsSet(true);
+ }
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMAlterPoolResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMAlterPoolResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMAlterPoolResponse.java
new file mode 100644
index 0000000..1d9283a
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMAlterPoolResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMAlterPoolResponse implements org.apache.thrift.TBase<WMAlterPoolResponse, WMAlterPoolResponse._Fields>, java.io.Serializable, Cloneable, Comparable<WMAlterPoolResponse> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMAlterPoolResponse");
+
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMAlterPoolResponseStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMAlterPoolResponseTupleSchemeFactory());
+ }
+
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMAlterPoolResponse.class, metaDataMap);
+ }
+
+ public WMAlterPoolResponse() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMAlterPoolResponse(WMAlterPoolResponse other) {
+ }
+
+ public WMAlterPoolResponse deepCopy() {
+ return new WMAlterPoolResponse(this);
+ }
+
+ @Override
+ public void clear() {
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMAlterPoolResponse)
+ return this.equals((WMAlterPoolResponse)that);
+ return false;
+ }
+
+ public boolean equals(WMAlterPoolResponse that) {
+ if (that == null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMAlterPoolResponse other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMAlterPoolResponse(");
+ boolean first = true;
+
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMAlterPoolResponseStandardSchemeFactory implements SchemeFactory {
+ public WMAlterPoolResponseStandardScheme getScheme() {
+ return new WMAlterPoolResponseStandardScheme();
+ }
+ }
+
+ private static class WMAlterPoolResponseStandardScheme extends StandardScheme<WMAlterPoolResponse> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMAlterPoolResponse struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMAlterPoolResponse struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMAlterPoolResponseTupleSchemeFactory implements SchemeFactory {
+ public WMAlterPoolResponseTupleScheme getScheme() {
+ return new WMAlterPoolResponseTupleScheme();
+ }
+ }
+
+ private static class WMAlterPoolResponseTupleScheme extends TupleScheme<WMAlterPoolResponse> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMAlterPoolResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMAlterPoolResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingRequest.java
new file mode 100644
index 0000000..f807be9
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingRequest.java
@@ -0,0 +1,708 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrDropTriggerToPoolMappingRequest implements org.apache.thrift.TBase<WMCreateOrDropTriggerToPoolMappingRequest, WMCreateOrDropTriggerToPoolMappingRequest._Fields>, java.io.Serializable, Cloneable, Comparable<WMCreateOrDropTriggerToPoolMappingRequest> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrDropTriggerToPoolMappingRequest");
+
+ private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlanName", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField TRIGGER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("triggerName", org.apache.thrift.protocol.TType.STRING, (short)2);
+ private static final org.apache.thrift.protocol.TField POOL_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("poolPath", org.apache.thrift.protocol.TType.STRING, (short)3);
+ private static final org.apache.thrift.protocol.TField DROP_FIELD_DESC = new org.apache.thrift.protocol.TField("drop", org.apache.thrift.protocol.TType.BOOL, (short)4);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMCreateOrDropTriggerToPoolMappingRequestStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMCreateOrDropTriggerToPoolMappingRequestTupleSchemeFactory());
+ }
+
+ private String resourcePlanName; // optional
+ private String triggerName; // optional
+ private String poolPath; // optional
+ private boolean drop; // optional
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ RESOURCE_PLAN_NAME((short)1, "resourcePlanName"),
+ TRIGGER_NAME((short)2, "triggerName"),
+ POOL_PATH((short)3, "poolPath"),
+ DROP((short)4, "drop");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // RESOURCE_PLAN_NAME
+ return RESOURCE_PLAN_NAME;
+ case 2: // TRIGGER_NAME
+ return TRIGGER_NAME;
+ case 3: // POOL_PATH
+ return POOL_PATH;
+ case 4: // DROP
+ return DROP;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final int __DROP_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.RESOURCE_PLAN_NAME,_Fields.TRIGGER_NAME,_Fields.POOL_PATH,_Fields.DROP};
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.RESOURCE_PLAN_NAME, new org.apache.thrift.meta_data.FieldMetaData("resourcePlanName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TRIGGER_NAME, new org.apache.thrift.meta_data.FieldMetaData("triggerName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.POOL_PATH, new org.apache.thrift.meta_data.FieldMetaData("poolPath", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.DROP, new org.apache.thrift.meta_data.FieldMetaData("drop", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrDropTriggerToPoolMappingRequest.class, metaDataMap);
+ }
+
+ public WMCreateOrDropTriggerToPoolMappingRequest() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMCreateOrDropTriggerToPoolMappingRequest(WMCreateOrDropTriggerToPoolMappingRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
+ if (other.isSetResourcePlanName()) {
+ this.resourcePlanName = other.resourcePlanName;
+ }
+ if (other.isSetTriggerName()) {
+ this.triggerName = other.triggerName;
+ }
+ if (other.isSetPoolPath()) {
+ this.poolPath = other.poolPath;
+ }
+ this.drop = other.drop;
+ }
+
+ public WMCreateOrDropTriggerToPoolMappingRequest deepCopy() {
+ return new WMCreateOrDropTriggerToPoolMappingRequest(this);
+ }
+
+ @Override
+ public void clear() {
+ this.resourcePlanName = null;
+ this.triggerName = null;
+ this.poolPath = null;
+ setDropIsSet(false);
+ this.drop = false;
+ }
+
+ public String getResourcePlanName() {
+ return this.resourcePlanName;
+ }
+
+ public void setResourcePlanName(String resourcePlanName) {
+ this.resourcePlanName = resourcePlanName;
+ }
+
+ public void unsetResourcePlanName() {
+ this.resourcePlanName = null;
+ }
+
+ /** Returns true if field resourcePlanName is set (has been assigned a value) and false otherwise */
+ public boolean isSetResourcePlanName() {
+ return this.resourcePlanName != null;
+ }
+
+ public void setResourcePlanNameIsSet(boolean value) {
+ if (!value) {
+ this.resourcePlanName = null;
+ }
+ }
+
+ public String getTriggerName() {
+ return this.triggerName;
+ }
+
+ public void setTriggerName(String triggerName) {
+ this.triggerName = triggerName;
+ }
+
+ public void unsetTriggerName() {
+ this.triggerName = null;
+ }
+
+ /** Returns true if field triggerName is set (has been assigned a value) and false otherwise */
+ public boolean isSetTriggerName() {
+ return this.triggerName != null;
+ }
+
+ public void setTriggerNameIsSet(boolean value) {
+ if (!value) {
+ this.triggerName = null;
+ }
+ }
+
+ public String getPoolPath() {
+ return this.poolPath;
+ }
+
+ public void setPoolPath(String poolPath) {
+ this.poolPath = poolPath;
+ }
+
+ public void unsetPoolPath() {
+ this.poolPath = null;
+ }
+
+ /** Returns true if field poolPath is set (has been assigned a value) and false otherwise */
+ public boolean isSetPoolPath() {
+ return this.poolPath != null;
+ }
+
+ public void setPoolPathIsSet(boolean value) {
+ if (!value) {
+ this.poolPath = null;
+ }
+ }
+
+ public boolean isDrop() {
+ return this.drop;
+ }
+
+ public void setDrop(boolean drop) {
+ this.drop = drop;
+ setDropIsSet(true);
+ }
+
+ public void unsetDrop() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DROP_ISSET_ID);
+ }
+
+ /** Returns true if field drop is set (has been assigned a value) and false otherwise */
+ public boolean isSetDrop() {
+ return EncodingUtils.testBit(__isset_bitfield, __DROP_ISSET_ID);
+ }
+
+ public void setDropIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DROP_ISSET_ID, value);
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case RESOURCE_PLAN_NAME:
+ if (value == null) {
+ unsetResourcePlanName();
+ } else {
+ setResourcePlanName((String)value);
+ }
+ break;
+
+ case TRIGGER_NAME:
+ if (value == null) {
+ unsetTriggerName();
+ } else {
+ setTriggerName((String)value);
+ }
+ break;
+
+ case POOL_PATH:
+ if (value == null) {
+ unsetPoolPath();
+ } else {
+ setPoolPath((String)value);
+ }
+ break;
+
+ case DROP:
+ if (value == null) {
+ unsetDrop();
+ } else {
+ setDrop((Boolean)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case RESOURCE_PLAN_NAME:
+ return getResourcePlanName();
+
+ case TRIGGER_NAME:
+ return getTriggerName();
+
+ case POOL_PATH:
+ return getPoolPath();
+
+ case DROP:
+ return isDrop();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case RESOURCE_PLAN_NAME:
+ return isSetResourcePlanName();
+ case TRIGGER_NAME:
+ return isSetTriggerName();
+ case POOL_PATH:
+ return isSetPoolPath();
+ case DROP:
+ return isSetDrop();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMCreateOrDropTriggerToPoolMappingRequest)
+ return this.equals((WMCreateOrDropTriggerToPoolMappingRequest)that);
+ return false;
+ }
+
+ public boolean equals(WMCreateOrDropTriggerToPoolMappingRequest that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_resourcePlanName = true && this.isSetResourcePlanName();
+ boolean that_present_resourcePlanName = true && that.isSetResourcePlanName();
+ if (this_present_resourcePlanName || that_present_resourcePlanName) {
+ if (!(this_present_resourcePlanName && that_present_resourcePlanName))
+ return false;
+ if (!this.resourcePlanName.equals(that.resourcePlanName))
+ return false;
+ }
+
+ boolean this_present_triggerName = true && this.isSetTriggerName();
+ boolean that_present_triggerName = true && that.isSetTriggerName();
+ if (this_present_triggerName || that_present_triggerName) {
+ if (!(this_present_triggerName && that_present_triggerName))
+ return false;
+ if (!this.triggerName.equals(that.triggerName))
+ return false;
+ }
+
+ boolean this_present_poolPath = true && this.isSetPoolPath();
+ boolean that_present_poolPath = true && that.isSetPoolPath();
+ if (this_present_poolPath || that_present_poolPath) {
+ if (!(this_present_poolPath && that_present_poolPath))
+ return false;
+ if (!this.poolPath.equals(that.poolPath))
+ return false;
+ }
+
+ boolean this_present_drop = true && this.isSetDrop();
+ boolean that_present_drop = true && that.isSetDrop();
+ if (this_present_drop || that_present_drop) {
+ if (!(this_present_drop && that_present_drop))
+ return false;
+ if (this.drop != that.drop)
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_resourcePlanName = true && (isSetResourcePlanName());
+ list.add(present_resourcePlanName);
+ if (present_resourcePlanName)
+ list.add(resourcePlanName);
+
+ boolean present_triggerName = true && (isSetTriggerName());
+ list.add(present_triggerName);
+ if (present_triggerName)
+ list.add(triggerName);
+
+ boolean present_poolPath = true && (isSetPoolPath());
+ list.add(present_poolPath);
+ if (present_poolPath)
+ list.add(poolPath);
+
+ boolean present_drop = true && (isSetDrop());
+ list.add(present_drop);
+ if (present_drop)
+ list.add(drop);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMCreateOrDropTriggerToPoolMappingRequest other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetResourcePlanName()).compareTo(other.isSetResourcePlanName());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetResourcePlanName()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourcePlanName, other.resourcePlanName);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetTriggerName()).compareTo(other.isSetTriggerName());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTriggerName()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.triggerName, other.triggerName);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetPoolPath()).compareTo(other.isSetPoolPath());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetPoolPath()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolPath, other.poolPath);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetDrop()).compareTo(other.isSetDrop());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetDrop()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.drop, other.drop);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMCreateOrDropTriggerToPoolMappingRequest(");
+ boolean first = true;
+
+ if (isSetResourcePlanName()) {
+ sb.append("resourcePlanName:");
+ if (this.resourcePlanName == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.resourcePlanName);
+ }
+ first = false;
+ }
+ if (isSetTriggerName()) {
+ if (!first) sb.append(", ");
+ sb.append("triggerName:");
+ if (this.triggerName == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.triggerName);
+ }
+ first = false;
+ }
+ if (isSetPoolPath()) {
+ if (!first) sb.append(", ");
+ sb.append("poolPath:");
+ if (this.poolPath == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.poolPath);
+ }
+ first = false;
+ }
+ if (isSetDrop()) {
+ if (!first) sb.append(", ");
+ sb.append("drop:");
+ sb.append(this.drop);
+ first = false;
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMCreateOrDropTriggerToPoolMappingRequestStandardSchemeFactory implements SchemeFactory {
+ public WMCreateOrDropTriggerToPoolMappingRequestStandardScheme getScheme() {
+ return new WMCreateOrDropTriggerToPoolMappingRequestStandardScheme();
+ }
+ }
+
+ private static class WMCreateOrDropTriggerToPoolMappingRequestStandardScheme extends StandardScheme<WMCreateOrDropTriggerToPoolMappingRequest> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrDropTriggerToPoolMappingRequest struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // RESOURCE_PLAN_NAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.resourcePlanName = iprot.readString();
+ struct.setResourcePlanNameIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // TRIGGER_NAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.triggerName = iprot.readString();
+ struct.setTriggerNameIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 3: // POOL_PATH
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.poolPath = iprot.readString();
+ struct.setPoolPathIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 4: // DROP
+ if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+ struct.drop = iprot.readBool();
+ struct.setDropIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrDropTriggerToPoolMappingRequest struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.resourcePlanName != null) {
+ if (struct.isSetResourcePlanName()) {
+ oprot.writeFieldBegin(RESOURCE_PLAN_NAME_FIELD_DESC);
+ oprot.writeString(struct.resourcePlanName);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.triggerName != null) {
+ if (struct.isSetTriggerName()) {
+ oprot.writeFieldBegin(TRIGGER_NAME_FIELD_DESC);
+ oprot.writeString(struct.triggerName);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.poolPath != null) {
+ if (struct.isSetPoolPath()) {
+ oprot.writeFieldBegin(POOL_PATH_FIELD_DESC);
+ oprot.writeString(struct.poolPath);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isSetDrop()) {
+ oprot.writeFieldBegin(DROP_FIELD_DESC);
+ oprot.writeBool(struct.drop);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMCreateOrDropTriggerToPoolMappingRequestTupleSchemeFactory implements SchemeFactory {
+ public WMCreateOrDropTriggerToPoolMappingRequestTupleScheme getScheme() {
+ return new WMCreateOrDropTriggerToPoolMappingRequestTupleScheme();
+ }
+ }
+
+ private static class WMCreateOrDropTriggerToPoolMappingRequestTupleScheme extends TupleScheme<WMCreateOrDropTriggerToPoolMappingRequest> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrDropTriggerToPoolMappingRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetResourcePlanName()) {
+ optionals.set(0);
+ }
+ if (struct.isSetTriggerName()) {
+ optionals.set(1);
+ }
+ if (struct.isSetPoolPath()) {
+ optionals.set(2);
+ }
+ if (struct.isSetDrop()) {
+ optionals.set(3);
+ }
+ oprot.writeBitSet(optionals, 4);
+ if (struct.isSetResourcePlanName()) {
+ oprot.writeString(struct.resourcePlanName);
+ }
+ if (struct.isSetTriggerName()) {
+ oprot.writeString(struct.triggerName);
+ }
+ if (struct.isSetPoolPath()) {
+ oprot.writeString(struct.poolPath);
+ }
+ if (struct.isSetDrop()) {
+ oprot.writeBool(struct.drop);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrDropTriggerToPoolMappingRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(4);
+ if (incoming.get(0)) {
+ struct.resourcePlanName = iprot.readString();
+ struct.setResourcePlanNameIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.triggerName = iprot.readString();
+ struct.setTriggerNameIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.poolPath = iprot.readString();
+ struct.setPoolPathIsSet(true);
+ }
+ if (incoming.get(3)) {
+ struct.drop = iprot.readBool();
+ struct.setDropIsSet(true);
+ }
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingResponse.java
new file mode 100644
index 0000000..33b28cd
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrDropTriggerToPoolMappingResponse implements org.apache.thrift.TBase<WMCreateOrDropTriggerToPoolMappingResponse, WMCreateOrDropTriggerToPoolMappingResponse._Fields>, java.io.Serializable, Cloneable, Comparable<WMCreateOrDropTriggerToPoolMappingResponse> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrDropTriggerToPoolMappingResponse");
+
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMCreateOrDropTriggerToPoolMappingResponseStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMCreateOrDropTriggerToPoolMappingResponseTupleSchemeFactory());
+ }
+
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrDropTriggerToPoolMappingResponse.class, metaDataMap);
+ }
+
+ public WMCreateOrDropTriggerToPoolMappingResponse() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMCreateOrDropTriggerToPoolMappingResponse(WMCreateOrDropTriggerToPoolMappingResponse other) {
+ }
+
+ public WMCreateOrDropTriggerToPoolMappingResponse deepCopy() {
+ return new WMCreateOrDropTriggerToPoolMappingResponse(this);
+ }
+
+ @Override
+ public void clear() {
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMCreateOrDropTriggerToPoolMappingResponse)
+ return this.equals((WMCreateOrDropTriggerToPoolMappingResponse)that);
+ return false;
+ }
+
+ public boolean equals(WMCreateOrDropTriggerToPoolMappingResponse that) {
+ if (that == null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMCreateOrDropTriggerToPoolMappingResponse other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMCreateOrDropTriggerToPoolMappingResponse(");
+ boolean first = true;
+
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMCreateOrDropTriggerToPoolMappingResponseStandardSchemeFactory implements SchemeFactory {
+ public WMCreateOrDropTriggerToPoolMappingResponseStandardScheme getScheme() {
+ return new WMCreateOrDropTriggerToPoolMappingResponseStandardScheme();
+ }
+ }
+
+ private static class WMCreateOrDropTriggerToPoolMappingResponseStandardScheme extends StandardScheme<WMCreateOrDropTriggerToPoolMappingResponse> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrDropTriggerToPoolMappingResponse struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrDropTriggerToPoolMappingResponse struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMCreateOrDropTriggerToPoolMappingResponseTupleSchemeFactory implements SchemeFactory {
+ public WMCreateOrDropTriggerToPoolMappingResponseTupleScheme getScheme() {
+ return new WMCreateOrDropTriggerToPoolMappingResponseTupleScheme();
+ }
+ }
+
+ private static class WMCreateOrDropTriggerToPoolMappingResponseTupleScheme extends TupleScheme<WMCreateOrDropTriggerToPoolMappingResponse> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrDropTriggerToPoolMappingResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrDropTriggerToPoolMappingResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingRequest.java
new file mode 100644
index 0000000..2fbe10e
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingRequest.java
@@ -0,0 +1,501 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrUpdateMappingRequest implements org.apache.thrift.TBase<WMCreateOrUpdateMappingRequest, WMCreateOrUpdateMappingRequest._Fields>, java.io.Serializable, Cloneable, Comparable<WMCreateOrUpdateMappingRequest> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrUpdateMappingRequest");
+
+ private static final org.apache.thrift.protocol.TField MAPPING_FIELD_DESC = new org.apache.thrift.protocol.TField("mapping", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField UPDATE_FIELD_DESC = new org.apache.thrift.protocol.TField("update", org.apache.thrift.protocol.TType.BOOL, (short)2);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMCreateOrUpdateMappingRequestStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMCreateOrUpdateMappingRequestTupleSchemeFactory());
+ }
+
+ private WMMapping mapping; // optional
+ private boolean update; // optional
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ MAPPING((short)1, "mapping"),
+ UPDATE((short)2, "update");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // MAPPING
+ return MAPPING;
+ case 2: // UPDATE
+ return UPDATE;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final int __UPDATE_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.MAPPING,_Fields.UPDATE};
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.MAPPING, new org.apache.thrift.meta_data.FieldMetaData("mapping", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMMapping.class)));
+ tmpMap.put(_Fields.UPDATE, new org.apache.thrift.meta_data.FieldMetaData("update", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrUpdateMappingRequest.class, metaDataMap);
+ }
+
+ public WMCreateOrUpdateMappingRequest() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMCreateOrUpdateMappingRequest(WMCreateOrUpdateMappingRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
+ if (other.isSetMapping()) {
+ this.mapping = new WMMapping(other.mapping);
+ }
+ this.update = other.update;
+ }
+
+ public WMCreateOrUpdateMappingRequest deepCopy() {
+ return new WMCreateOrUpdateMappingRequest(this);
+ }
+
+ @Override
+ public void clear() {
+ this.mapping = null;
+ setUpdateIsSet(false);
+ this.update = false;
+ }
+
+ public WMMapping getMapping() {
+ return this.mapping;
+ }
+
+ public void setMapping(WMMapping mapping) {
+ this.mapping = mapping;
+ }
+
+ public void unsetMapping() {
+ this.mapping = null;
+ }
+
+ /** Returns true if field mapping is set (has been assigned a value) and false otherwise */
+ public boolean isSetMapping() {
+ return this.mapping != null;
+ }
+
+ public void setMappingIsSet(boolean value) {
+ if (!value) {
+ this.mapping = null;
+ }
+ }
+
+ public boolean isUpdate() {
+ return this.update;
+ }
+
+ public void setUpdate(boolean update) {
+ this.update = update;
+ setUpdateIsSet(true);
+ }
+
+ public void unsetUpdate() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPDATE_ISSET_ID);
+ }
+
+ /** Returns true if field update is set (has been assigned a value) and false otherwise */
+ public boolean isSetUpdate() {
+ return EncodingUtils.testBit(__isset_bitfield, __UPDATE_ISSET_ID);
+ }
+
+ public void setUpdateIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPDATE_ISSET_ID, value);
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case MAPPING:
+ if (value == null) {
+ unsetMapping();
+ } else {
+ setMapping((WMMapping)value);
+ }
+ break;
+
+ case UPDATE:
+ if (value == null) {
+ unsetUpdate();
+ } else {
+ setUpdate((Boolean)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case MAPPING:
+ return getMapping();
+
+ case UPDATE:
+ return isUpdate();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case MAPPING:
+ return isSetMapping();
+ case UPDATE:
+ return isSetUpdate();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMCreateOrUpdateMappingRequest)
+ return this.equals((WMCreateOrUpdateMappingRequest)that);
+ return false;
+ }
+
+ public boolean equals(WMCreateOrUpdateMappingRequest that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_mapping = true && this.isSetMapping();
+ boolean that_present_mapping = true && that.isSetMapping();
+ if (this_present_mapping || that_present_mapping) {
+ if (!(this_present_mapping && that_present_mapping))
+ return false;
+ if (!this.mapping.equals(that.mapping))
+ return false;
+ }
+
+ boolean this_present_update = true && this.isSetUpdate();
+ boolean that_present_update = true && that.isSetUpdate();
+ if (this_present_update || that_present_update) {
+ if (!(this_present_update && that_present_update))
+ return false;
+ if (this.update != that.update)
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_mapping = true && (isSetMapping());
+ list.add(present_mapping);
+ if (present_mapping)
+ list.add(mapping);
+
+ boolean present_update = true && (isSetUpdate());
+ list.add(present_update);
+ if (present_update)
+ list.add(update);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMCreateOrUpdateMappingRequest other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetMapping()).compareTo(other.isSetMapping());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetMapping()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mapping, other.mapping);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetUpdate()).compareTo(other.isSetUpdate());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetUpdate()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.update, other.update);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMCreateOrUpdateMappingRequest(");
+ boolean first = true;
+
+ if (isSetMapping()) {
+ sb.append("mapping:");
+ if (this.mapping == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.mapping);
+ }
+ first = false;
+ }
+ if (isSetUpdate()) {
+ if (!first) sb.append(", ");
+ sb.append("update:");
+ sb.append(this.update);
+ first = false;
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ if (mapping != null) {
+ mapping.validate();
+ }
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMCreateOrUpdateMappingRequestStandardSchemeFactory implements SchemeFactory {
+ public WMCreateOrUpdateMappingRequestStandardScheme getScheme() {
+ return new WMCreateOrUpdateMappingRequestStandardScheme();
+ }
+ }
+
+ private static class WMCreateOrUpdateMappingRequestStandardScheme extends StandardScheme<WMCreateOrUpdateMappingRequest> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrUpdateMappingRequest struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // MAPPING
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.mapping = new WMMapping();
+ struct.mapping.read(iprot);
+ struct.setMappingIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // UPDATE
+ if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+ struct.update = iprot.readBool();
+ struct.setUpdateIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrUpdateMappingRequest struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.mapping != null) {
+ if (struct.isSetMapping()) {
+ oprot.writeFieldBegin(MAPPING_FIELD_DESC);
+ struct.mapping.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isSetUpdate()) {
+ oprot.writeFieldBegin(UPDATE_FIELD_DESC);
+ oprot.writeBool(struct.update);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMCreateOrUpdateMappingRequestTupleSchemeFactory implements SchemeFactory {
+ public WMCreateOrUpdateMappingRequestTupleScheme getScheme() {
+ return new WMCreateOrUpdateMappingRequestTupleScheme();
+ }
+ }
+
+ private static class WMCreateOrUpdateMappingRequestTupleScheme extends TupleScheme<WMCreateOrUpdateMappingRequest> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdateMappingRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetMapping()) {
+ optionals.set(0);
+ }
+ if (struct.isSetUpdate()) {
+ optionals.set(1);
+ }
+ oprot.writeBitSet(optionals, 2);
+ if (struct.isSetMapping()) {
+ struct.mapping.write(oprot);
+ }
+ if (struct.isSetUpdate()) {
+ oprot.writeBool(struct.update);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdateMappingRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(2);
+ if (incoming.get(0)) {
+ struct.mapping = new WMMapping();
+ struct.mapping.read(iprot);
+ struct.setMappingIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.update = iprot.readBool();
+ struct.setUpdateIsSet(true);
+ }
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingResponse.java
new file mode 100644
index 0000000..5b8041f
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrUpdateMappingResponse implements org.apache.thrift.TBase<WMCreateOrUpdateMappingResponse, WMCreateOrUpdateMappingResponse._Fields>, java.io.Serializable, Cloneable, Comparable<WMCreateOrUpdateMappingResponse> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrUpdateMappingResponse");
+
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMCreateOrUpdateMappingResponseStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMCreateOrUpdateMappingResponseTupleSchemeFactory());
+ }
+
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrUpdateMappingResponse.class, metaDataMap);
+ }
+
+ public WMCreateOrUpdateMappingResponse() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMCreateOrUpdateMappingResponse(WMCreateOrUpdateMappingResponse other) {
+ }
+
+ public WMCreateOrUpdateMappingResponse deepCopy() {
+ return new WMCreateOrUpdateMappingResponse(this);
+ }
+
+ @Override
+ public void clear() {
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMCreateOrUpdateMappingResponse)
+ return this.equals((WMCreateOrUpdateMappingResponse)that);
+ return false;
+ }
+
+ public boolean equals(WMCreateOrUpdateMappingResponse that) {
+ if (that == null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMCreateOrUpdateMappingResponse other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMCreateOrUpdateMappingResponse(");
+ boolean first = true;
+
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMCreateOrUpdateMappingResponseStandardSchemeFactory implements SchemeFactory {
+ public WMCreateOrUpdateMappingResponseStandardScheme getScheme() {
+ return new WMCreateOrUpdateMappingResponseStandardScheme();
+ }
+ }
+
+ private static class WMCreateOrUpdateMappingResponseStandardScheme extends StandardScheme<WMCreateOrUpdateMappingResponse> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrUpdateMappingResponse struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrUpdateMappingResponse struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMCreateOrUpdateMappingResponseTupleSchemeFactory implements SchemeFactory {
+ public WMCreateOrUpdateMappingResponseTupleScheme getScheme() {
+ return new WMCreateOrUpdateMappingResponseTupleScheme();
+ }
+ }
+
+ private static class WMCreateOrUpdateMappingResponseTupleScheme extends TupleScheme<WMCreateOrUpdateMappingResponse> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdateMappingResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdateMappingResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ }
+ }
+
+}
+
[04/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
index 8dc556b..997e1f7 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -20916,7 +20916,7 @@ class WMMapping {
/**
* @var string
*/
- public $poolName = null;
+ public $poolPath = null;
/**
* @var int
*/
@@ -20938,7 +20938,7 @@ class WMMapping {
'type' => TType::STRING,
),
4 => array(
- 'var' => 'poolName',
+ 'var' => 'poolPath',
'type' => TType::STRING,
),
5 => array(
@@ -20957,8 +20957,8 @@ class WMMapping {
if (isset($vals['entityName'])) {
$this->entityName = $vals['entityName'];
}
- if (isset($vals['poolName'])) {
- $this->poolName = $vals['poolName'];
+ if (isset($vals['poolPath'])) {
+ $this->poolPath = $vals['poolPath'];
}
if (isset($vals['ordering'])) {
$this->ordering = $vals['ordering'];
@@ -21008,7 +21008,7 @@ class WMMapping {
break;
case 4:
if ($ftype == TType::STRING) {
- $xfer += $input->readString($this->poolName);
+ $xfer += $input->readString($this->poolPath);
} else {
$xfer += $input->skip($ftype);
}
@@ -21048,9 +21048,9 @@ class WMMapping {
$xfer += $output->writeString($this->entityName);
$xfer += $output->writeFieldEnd();
}
- if ($this->poolName !== null) {
- $xfer += $output->writeFieldBegin('poolName', TType::STRING, 4);
- $xfer += $output->writeString($this->poolName);
+ if ($this->poolPath !== null) {
+ $xfer += $output->writeFieldBegin('poolPath', TType::STRING, 4);
+ $xfer += $output->writeString($this->poolPath);
$xfer += $output->writeFieldEnd();
}
if ($this->ordering !== null) {
@@ -23082,6 +23082,914 @@ class WMGetTriggersForResourePlanResponse {
}
+class WMCreatePoolRequest {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMPool
+ */
+ public $pool = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'pool',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMPool',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['pool'])) {
+ $this->pool = $vals['pool'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'WMCreatePoolRequest';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->pool = new \metastore\WMPool();
+ $xfer += $this->pool->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMCreatePoolRequest');
+ if ($this->pool !== null) {
+ if (!is_object($this->pool)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('pool', TType::STRUCT, 1);
+ $xfer += $this->pool->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMCreatePoolResponse {
+ static $_TSPEC;
+
+
+ public function __construct() {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ );
+ }
+ }
+
+ public function getName() {
+ return 'WMCreatePoolResponse';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMCreatePoolResponse');
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMAlterPoolRequest {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMPool
+ */
+ public $pool = null;
+ /**
+ * @var string
+ */
+ public $poolPath = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'pool',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMPool',
+ ),
+ 2 => array(
+ 'var' => 'poolPath',
+ 'type' => TType::STRING,
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['pool'])) {
+ $this->pool = $vals['pool'];
+ }
+ if (isset($vals['poolPath'])) {
+ $this->poolPath = $vals['poolPath'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'WMAlterPoolRequest';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->pool = new \metastore\WMPool();
+ $xfer += $this->pool->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->poolPath);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMAlterPoolRequest');
+ if ($this->pool !== null) {
+ if (!is_object($this->pool)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('pool', TType::STRUCT, 1);
+ $xfer += $this->pool->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->poolPath !== null) {
+ $xfer += $output->writeFieldBegin('poolPath', TType::STRING, 2);
+ $xfer += $output->writeString($this->poolPath);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMAlterPoolResponse {
+ static $_TSPEC;
+
+
+ public function __construct() {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ );
+ }
+ }
+
+ public function getName() {
+ return 'WMAlterPoolResponse';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMAlterPoolResponse');
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMDropPoolRequest {
+ static $_TSPEC;
+
+ /**
+ * @var string
+ */
+ public $resourcePlanName = null;
+ /**
+ * @var string
+ */
+ public $poolPath = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'resourcePlanName',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'poolPath',
+ 'type' => TType::STRING,
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['resourcePlanName'])) {
+ $this->resourcePlanName = $vals['resourcePlanName'];
+ }
+ if (isset($vals['poolPath'])) {
+ $this->poolPath = $vals['poolPath'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'WMDropPoolRequest';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->resourcePlanName);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->poolPath);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMDropPoolRequest');
+ if ($this->resourcePlanName !== null) {
+ $xfer += $output->writeFieldBegin('resourcePlanName', TType::STRING, 1);
+ $xfer += $output->writeString($this->resourcePlanName);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->poolPath !== null) {
+ $xfer += $output->writeFieldBegin('poolPath', TType::STRING, 2);
+ $xfer += $output->writeString($this->poolPath);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMDropPoolResponse {
+ static $_TSPEC;
+
+
+ public function __construct() {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ );
+ }
+ }
+
+ public function getName() {
+ return 'WMDropPoolResponse';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMDropPoolResponse');
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMCreateOrUpdateMappingRequest {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMMapping
+ */
+ public $mapping = null;
+ /**
+ * @var bool
+ */
+ public $update = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'mapping',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMMapping',
+ ),
+ 2 => array(
+ 'var' => 'update',
+ 'type' => TType::BOOL,
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['mapping'])) {
+ $this->mapping = $vals['mapping'];
+ }
+ if (isset($vals['update'])) {
+ $this->update = $vals['update'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'WMCreateOrUpdateMappingRequest';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->mapping = new \metastore\WMMapping();
+ $xfer += $this->mapping->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::BOOL) {
+ $xfer += $input->readBool($this->update);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMCreateOrUpdateMappingRequest');
+ if ($this->mapping !== null) {
+ if (!is_object($this->mapping)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('mapping', TType::STRUCT, 1);
+ $xfer += $this->mapping->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->update !== null) {
+ $xfer += $output->writeFieldBegin('update', TType::BOOL, 2);
+ $xfer += $output->writeBool($this->update);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMCreateOrUpdateMappingResponse {
+ static $_TSPEC;
+
+
+ public function __construct() {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ );
+ }
+ }
+
+ public function getName() {
+ return 'WMCreateOrUpdateMappingResponse';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMCreateOrUpdateMappingResponse');
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMDropMappingRequest {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\WMMapping
+ */
+ public $mapping = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'mapping',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\WMMapping',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['mapping'])) {
+ $this->mapping = $vals['mapping'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'WMDropMappingRequest';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->mapping = new \metastore\WMMapping();
+ $xfer += $this->mapping->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMDropMappingRequest');
+ if ($this->mapping !== null) {
+ if (!is_object($this->mapping)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('mapping', TType::STRUCT, 1);
+ $xfer += $this->mapping->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMDropMappingResponse {
+ static $_TSPEC;
+
+
+ public function __construct() {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ );
+ }
+ }
+
+ public function getName() {
+ return 'WMDropMappingResponse';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMDropMappingResponse');
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMCreateOrDropTriggerToPoolMappingRequest {
+ static $_TSPEC;
+
+ /**
+ * @var string
+ */
+ public $resourcePlanName = null;
+ /**
+ * @var string
+ */
+ public $triggerName = null;
+ /**
+ * @var string
+ */
+ public $poolPath = null;
+ /**
+ * @var bool
+ */
+ public $drop = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'resourcePlanName',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'triggerName',
+ 'type' => TType::STRING,
+ ),
+ 3 => array(
+ 'var' => 'poolPath',
+ 'type' => TType::STRING,
+ ),
+ 4 => array(
+ 'var' => 'drop',
+ 'type' => TType::BOOL,
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['resourcePlanName'])) {
+ $this->resourcePlanName = $vals['resourcePlanName'];
+ }
+ if (isset($vals['triggerName'])) {
+ $this->triggerName = $vals['triggerName'];
+ }
+ if (isset($vals['poolPath'])) {
+ $this->poolPath = $vals['poolPath'];
+ }
+ if (isset($vals['drop'])) {
+ $this->drop = $vals['drop'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'WMCreateOrDropTriggerToPoolMappingRequest';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->resourcePlanName);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->triggerName);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->poolPath);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 4:
+ if ($ftype == TType::BOOL) {
+ $xfer += $input->readBool($this->drop);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMCreateOrDropTriggerToPoolMappingRequest');
+ if ($this->resourcePlanName !== null) {
+ $xfer += $output->writeFieldBegin('resourcePlanName', TType::STRING, 1);
+ $xfer += $output->writeString($this->resourcePlanName);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->triggerName !== null) {
+ $xfer += $output->writeFieldBegin('triggerName', TType::STRING, 2);
+ $xfer += $output->writeString($this->triggerName);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->poolPath !== null) {
+ $xfer += $output->writeFieldBegin('poolPath', TType::STRING, 3);
+ $xfer += $output->writeString($this->poolPath);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->drop !== null) {
+ $xfer += $output->writeFieldBegin('drop', TType::BOOL, 4);
+ $xfer += $output->writeBool($this->drop);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class WMCreateOrDropTriggerToPoolMappingResponse {
+ static $_TSPEC;
+
+
+ public function __construct() {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ );
+ }
+ }
+
+ public function getName() {
+ return 'WMCreateOrDropTriggerToPoolMappingResponse';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('WMCreateOrDropTriggerToPoolMappingResponse');
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
class MetaException extends TException {
static $_TSPEC;
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 9cc9d1c..5533044 100755
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -197,6 +197,12 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print(' WMAlterTriggerResponse alter_wm_trigger(WMAlterTriggerRequest request)')
print(' WMDropTriggerResponse drop_wm_trigger(WMDropTriggerRequest request)')
print(' WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest request)')
+ print(' WMCreatePoolResponse create_wm_pool(WMCreatePoolRequest request)')
+ print(' WMAlterPoolResponse alter_wm_pool(WMAlterPoolRequest request)')
+ print(' WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request)')
+ print(' WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request)')
+ print(' WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request)')
+ print(' WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request)')
print(' string getName()')
print(' string getVersion()')
print(' fb_status getStatus()')
@@ -1304,6 +1310,42 @@ elif cmd == 'get_triggers_for_resourceplan':
sys.exit(1)
pp.pprint(client.get_triggers_for_resourceplan(eval(args[0]),))
+elif cmd == 'create_wm_pool':
+ if len(args) != 1:
+ print('create_wm_pool requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.create_wm_pool(eval(args[0]),))
+
+elif cmd == 'alter_wm_pool':
+ if len(args) != 1:
+ print('alter_wm_pool requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.alter_wm_pool(eval(args[0]),))
+
+elif cmd == 'drop_wm_pool':
+ if len(args) != 1:
+ print('drop_wm_pool requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.drop_wm_pool(eval(args[0]),))
+
+elif cmd == 'create_or_update_wm_mapping':
+ if len(args) != 1:
+ print('create_or_update_wm_mapping requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.create_or_update_wm_mapping(eval(args[0]),))
+
+elif cmd == 'drop_wm_mapping':
+ if len(args) != 1:
+ print('drop_wm_mapping requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.drop_wm_mapping(eval(args[0]),))
+
+elif cmd == 'create_or_drop_wm_trigger_to_pool_mapping':
+ if len(args) != 1:
+ print('create_or_drop_wm_trigger_to_pool_mapping requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.create_or_drop_wm_trigger_to_pool_mapping(eval(args[0]),))
+
elif cmd == 'getName':
if len(args) != 0:
print('getName requires 0 args')
[08/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index febf304..1e51e37 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -388,6 +388,18 @@ import org.slf4j.LoggerFactory;
public WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ public WMCreatePoolResponse create_wm_pool(WMCreatePoolRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+
+ public WMAlterPoolResponse alter_wm_pool(WMAlterPoolRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+
+ public WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+
+ public WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+
+ public WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+
+ public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+
}
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface {
@@ -738,6 +750,18 @@ import org.slf4j.LoggerFactory;
public void get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void create_wm_pool(WMCreatePoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+ public void alter_wm_pool(WMAlterPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+ public void drop_wm_pool(WMDropPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+ public void create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+ public void drop_wm_mapping(WMDropMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+ public void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
}
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface {
@@ -5739,6 +5763,210 @@ import org.slf4j.LoggerFactory;
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_triggers_for_resourceplan failed: unknown result");
}
+ public WMCreatePoolResponse create_wm_pool(WMCreatePoolRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException
+ {
+ send_create_wm_pool(request);
+ return recv_create_wm_pool();
+ }
+
+ public void send_create_wm_pool(WMCreatePoolRequest request) throws org.apache.thrift.TException
+ {
+ create_wm_pool_args args = new create_wm_pool_args();
+ args.setRequest(request);
+ sendBase("create_wm_pool", args);
+ }
+
+ public WMCreatePoolResponse recv_create_wm_pool() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException
+ {
+ create_wm_pool_result result = new create_wm_pool_result();
+ receiveBase(result, "create_wm_pool");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ if (result.o3 != null) {
+ throw result.o3;
+ }
+ if (result.o4 != null) {
+ throw result.o4;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "create_wm_pool failed: unknown result");
+ }
+
+ public WMAlterPoolResponse alter_wm_pool(WMAlterPoolRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException
+ {
+ send_alter_wm_pool(request);
+ return recv_alter_wm_pool();
+ }
+
+ public void send_alter_wm_pool(WMAlterPoolRequest request) throws org.apache.thrift.TException
+ {
+ alter_wm_pool_args args = new alter_wm_pool_args();
+ args.setRequest(request);
+ sendBase("alter_wm_pool", args);
+ }
+
+ public WMAlterPoolResponse recv_alter_wm_pool() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException
+ {
+ alter_wm_pool_result result = new alter_wm_pool_result();
+ receiveBase(result, "alter_wm_pool");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ if (result.o3 != null) {
+ throw result.o3;
+ }
+ if (result.o4 != null) {
+ throw result.o4;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_wm_pool failed: unknown result");
+ }
+
+ public WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+ {
+ send_drop_wm_pool(request);
+ return recv_drop_wm_pool();
+ }
+
+ public void send_drop_wm_pool(WMDropPoolRequest request) throws org.apache.thrift.TException
+ {
+ drop_wm_pool_args args = new drop_wm_pool_args();
+ args.setRequest(request);
+ sendBase("drop_wm_pool", args);
+ }
+
+ public WMDropPoolResponse recv_drop_wm_pool() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+ {
+ drop_wm_pool_result result = new drop_wm_pool_result();
+ receiveBase(result, "drop_wm_pool");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ if (result.o3 != null) {
+ throw result.o3;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "drop_wm_pool failed: unknown result");
+ }
+
+ public WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException
+ {
+ send_create_or_update_wm_mapping(request);
+ return recv_create_or_update_wm_mapping();
+ }
+
+ public void send_create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request) throws org.apache.thrift.TException
+ {
+ create_or_update_wm_mapping_args args = new create_or_update_wm_mapping_args();
+ args.setRequest(request);
+ sendBase("create_or_update_wm_mapping", args);
+ }
+
+ public WMCreateOrUpdateMappingResponse recv_create_or_update_wm_mapping() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException
+ {
+ create_or_update_wm_mapping_result result = new create_or_update_wm_mapping_result();
+ receiveBase(result, "create_or_update_wm_mapping");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ if (result.o3 != null) {
+ throw result.o3;
+ }
+ if (result.o4 != null) {
+ throw result.o4;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "create_or_update_wm_mapping failed: unknown result");
+ }
+
+ public WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+ {
+ send_drop_wm_mapping(request);
+ return recv_drop_wm_mapping();
+ }
+
+ public void send_drop_wm_mapping(WMDropMappingRequest request) throws org.apache.thrift.TException
+ {
+ drop_wm_mapping_args args = new drop_wm_mapping_args();
+ args.setRequest(request);
+ sendBase("drop_wm_mapping", args);
+ }
+
+ public WMDropMappingResponse recv_drop_wm_mapping() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+ {
+ drop_wm_mapping_result result = new drop_wm_mapping_result();
+ receiveBase(result, "drop_wm_mapping");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ if (result.o3 != null) {
+ throw result.o3;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "drop_wm_mapping failed: unknown result");
+ }
+
+ public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException
+ {
+ send_create_or_drop_wm_trigger_to_pool_mapping(request);
+ return recv_create_or_drop_wm_trigger_to_pool_mapping();
+ }
+
+ public void send_create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request) throws org.apache.thrift.TException
+ {
+ create_or_drop_wm_trigger_to_pool_mapping_args args = new create_or_drop_wm_trigger_to_pool_mapping_args();
+ args.setRequest(request);
+ sendBase("create_or_drop_wm_trigger_to_pool_mapping", args);
+ }
+
+ public WMCreateOrDropTriggerToPoolMappingResponse recv_create_or_drop_wm_trigger_to_pool_mapping() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException
+ {
+ create_or_drop_wm_trigger_to_pool_mapping_result result = new create_or_drop_wm_trigger_to_pool_mapping_result();
+ receiveBase(result, "create_or_drop_wm_trigger_to_pool_mapping");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ if (result.o3 != null) {
+ throw result.o3;
+ }
+ if (result.o4 != null) {
+ throw result.o4;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "create_or_drop_wm_trigger_to_pool_mapping failed: unknown result");
+ }
+
}
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface {
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
@@ -11791,6 +12019,198 @@ import org.slf4j.LoggerFactory;
}
}
+ public void create_wm_pool(WMCreatePoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ create_wm_pool_call method_call = new create_wm_pool_call(request, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_wm_pool_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private WMCreatePoolRequest request;
+ public create_wm_pool_call(WMCreatePoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.request = request;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_wm_pool", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ create_wm_pool_args args = new create_wm_pool_args();
+ args.setRequest(request);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public WMCreatePoolResponse getResult() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_create_wm_pool();
+ }
+ }
+
+ public void alter_wm_pool(WMAlterPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ alter_wm_pool_call method_call = new alter_wm_pool_call(request, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_wm_pool_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private WMAlterPoolRequest request;
+ public alter_wm_pool_call(WMAlterPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.request = request;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_wm_pool", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ alter_wm_pool_args args = new alter_wm_pool_args();
+ args.setRequest(request);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public WMAlterPoolResponse getResult() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_alter_wm_pool();
+ }
+ }
+
+ public void drop_wm_pool(WMDropPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ drop_wm_pool_call method_call = new drop_wm_pool_call(request, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_pool_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private WMDropPoolRequest request;
+ public drop_wm_pool_call(WMDropPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.request = request;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_wm_pool", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ drop_wm_pool_args args = new drop_wm_pool_args();
+ args.setRequest(request);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public WMDropPoolResponse getResult() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_drop_wm_pool();
+ }
+ }
+
+ public void create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ create_or_update_wm_mapping_call method_call = new create_or_update_wm_mapping_call(request, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_mapping_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private WMCreateOrUpdateMappingRequest request;
+ public create_or_update_wm_mapping_call(WMCreateOrUpdateMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.request = request;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_or_update_wm_mapping", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ create_or_update_wm_mapping_args args = new create_or_update_wm_mapping_args();
+ args.setRequest(request);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public WMCreateOrUpdateMappingResponse getResult() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_create_or_update_wm_mapping();
+ }
+ }
+
+ public void drop_wm_mapping(WMDropMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ drop_wm_mapping_call method_call = new drop_wm_mapping_call(request, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_mapping_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private WMDropMappingRequest request;
+ public drop_wm_mapping_call(WMDropMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.request = request;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_wm_mapping", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ drop_wm_mapping_args args = new drop_wm_mapping_args();
+ args.setRequest(request);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public WMDropMappingResponse getResult() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_drop_wm_mapping();
+ }
+ }
+
+ public void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ create_or_drop_wm_trigger_to_pool_mapping_call method_call = new create_or_drop_wm_trigger_to_pool_mapping_call(request, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_drop_wm_trigger_to_pool_mapping_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private WMCreateOrDropTriggerToPoolMappingRequest request;
+ public create_or_drop_wm_trigger_to_pool_mapping_call(WMCreateOrDropTriggerToPoolMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.request = request;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_or_drop_wm_trigger_to_pool_mapping", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ create_or_drop_wm_trigger_to_pool_mapping_args args = new create_or_drop_wm_trigger_to_pool_mapping_args();
+ args.setRequest(request);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public WMCreateOrDropTriggerToPoolMappingResponse getResult() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_create_or_drop_wm_trigger_to_pool_mapping();
+ }
+ }
+
}
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Processor<I extends Iface> extends com.facebook.fb303.FacebookService.Processor<I> implements org.apache.thrift.TProcessor {
@@ -11977,6 +12397,12 @@ import org.slf4j.LoggerFactory;
processMap.put("alter_wm_trigger", new alter_wm_trigger());
processMap.put("drop_wm_trigger", new drop_wm_trigger());
processMap.put("get_triggers_for_resourceplan", new get_triggers_for_resourceplan());
+ processMap.put("create_wm_pool", new create_wm_pool());
+ processMap.put("alter_wm_pool", new alter_wm_pool());
+ processMap.put("drop_wm_pool", new drop_wm_pool());
+ processMap.put("create_or_update_wm_mapping", new create_or_update_wm_mapping());
+ processMap.put("drop_wm_mapping", new drop_wm_mapping());
+ processMap.put("create_or_drop_wm_trigger_to_pool_mapping", new create_or_drop_wm_trigger_to_pool_mapping());
return processMap;
}
@@ -16396,6 +16822,182 @@ import org.slf4j.LoggerFactory;
}
}
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_wm_pool<I extends Iface> extends org.apache.thrift.ProcessFunction<I, create_wm_pool_args> {
+ public create_wm_pool() {
+ super("create_wm_pool");
+ }
+
+ public create_wm_pool_args getEmptyArgsInstance() {
+ return new create_wm_pool_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public create_wm_pool_result getResult(I iface, create_wm_pool_args args) throws org.apache.thrift.TException {
+ create_wm_pool_result result = new create_wm_pool_result();
+ try {
+ result.success = iface.create_wm_pool(args.request);
+ } catch (AlreadyExistsException o1) {
+ result.o1 = o1;
+ } catch (NoSuchObjectException o2) {
+ result.o2 = o2;
+ } catch (InvalidObjectException o3) {
+ result.o3 = o3;
+ } catch (MetaException o4) {
+ result.o4 = o4;
+ }
+ return result;
+ }
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_wm_pool<I extends Iface> extends org.apache.thrift.ProcessFunction<I, alter_wm_pool_args> {
+ public alter_wm_pool() {
+ super("alter_wm_pool");
+ }
+
+ public alter_wm_pool_args getEmptyArgsInstance() {
+ return new alter_wm_pool_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public alter_wm_pool_result getResult(I iface, alter_wm_pool_args args) throws org.apache.thrift.TException {
+ alter_wm_pool_result result = new alter_wm_pool_result();
+ try {
+ result.success = iface.alter_wm_pool(args.request);
+ } catch (AlreadyExistsException o1) {
+ result.o1 = o1;
+ } catch (NoSuchObjectException o2) {
+ result.o2 = o2;
+ } catch (InvalidObjectException o3) {
+ result.o3 = o3;
+ } catch (MetaException o4) {
+ result.o4 = o4;
+ }
+ return result;
+ }
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_pool<I extends Iface> extends org.apache.thrift.ProcessFunction<I, drop_wm_pool_args> {
+ public drop_wm_pool() {
+ super("drop_wm_pool");
+ }
+
+ public drop_wm_pool_args getEmptyArgsInstance() {
+ return new drop_wm_pool_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public drop_wm_pool_result getResult(I iface, drop_wm_pool_args args) throws org.apache.thrift.TException {
+ drop_wm_pool_result result = new drop_wm_pool_result();
+ try {
+ result.success = iface.drop_wm_pool(args.request);
+ } catch (NoSuchObjectException o1) {
+ result.o1 = o1;
+ } catch (InvalidOperationException o2) {
+ result.o2 = o2;
+ } catch (MetaException o3) {
+ result.o3 = o3;
+ }
+ return result;
+ }
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_mapping<I extends Iface> extends org.apache.thrift.ProcessFunction<I, create_or_update_wm_mapping_args> {
+ public create_or_update_wm_mapping() {
+ super("create_or_update_wm_mapping");
+ }
+
+ public create_or_update_wm_mapping_args getEmptyArgsInstance() {
+ return new create_or_update_wm_mapping_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public create_or_update_wm_mapping_result getResult(I iface, create_or_update_wm_mapping_args args) throws org.apache.thrift.TException {
+ create_or_update_wm_mapping_result result = new create_or_update_wm_mapping_result();
+ try {
+ result.success = iface.create_or_update_wm_mapping(args.request);
+ } catch (AlreadyExistsException o1) {
+ result.o1 = o1;
+ } catch (NoSuchObjectException o2) {
+ result.o2 = o2;
+ } catch (InvalidObjectException o3) {
+ result.o3 = o3;
+ } catch (MetaException o4) {
+ result.o4 = o4;
+ }
+ return result;
+ }
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_mapping<I extends Iface> extends org.apache.thrift.ProcessFunction<I, drop_wm_mapping_args> {
+ public drop_wm_mapping() {
+ super("drop_wm_mapping");
+ }
+
+ public drop_wm_mapping_args getEmptyArgsInstance() {
+ return new drop_wm_mapping_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public drop_wm_mapping_result getResult(I iface, drop_wm_mapping_args args) throws org.apache.thrift.TException {
+ drop_wm_mapping_result result = new drop_wm_mapping_result();
+ try {
+ result.success = iface.drop_wm_mapping(args.request);
+ } catch (NoSuchObjectException o1) {
+ result.o1 = o1;
+ } catch (InvalidOperationException o2) {
+ result.o2 = o2;
+ } catch (MetaException o3) {
+ result.o3 = o3;
+ }
+ return result;
+ }
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_drop_wm_trigger_to_pool_mapping<I extends Iface> extends org.apache.thrift.ProcessFunction<I, create_or_drop_wm_trigger_to_pool_mapping_args> {
+ public create_or_drop_wm_trigger_to_pool_mapping() {
+ super("create_or_drop_wm_trigger_to_pool_mapping");
+ }
+
+ public create_or_drop_wm_trigger_to_pool_mapping_args getEmptyArgsInstance() {
+ return new create_or_drop_wm_trigger_to_pool_mapping_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public create_or_drop_wm_trigger_to_pool_mapping_result getResult(I iface, create_or_drop_wm_trigger_to_pool_mapping_args args) throws org.apache.thrift.TException {
+ create_or_drop_wm_trigger_to_pool_mapping_result result = new create_or_drop_wm_trigger_to_pool_mapping_result();
+ try {
+ result.success = iface.create_or_drop_wm_trigger_to_pool_mapping(args.request);
+ } catch (AlreadyExistsException o1) {
+ result.o1 = o1;
+ } catch (NoSuchObjectException o2) {
+ result.o2 = o2;
+ } catch (InvalidObjectException o3) {
+ result.o3 = o3;
+ } catch (MetaException o4) {
+ result.o4 = o4;
+ }
+ return result;
+ }
+ }
+
}
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncProcessor<I extends AsyncIface> extends com.facebook.fb303.FacebookService.AsyncProcessor<I> {
@@ -16582,6 +17184,12 @@ import org.slf4j.LoggerFactory;
processMap.put("alter_wm_trigger", new alter_wm_trigger());
processMap.put("drop_wm_trigger", new drop_wm_trigger());
processMap.put("get_triggers_for_resourceplan", new get_triggers_for_resourceplan());
+ processMap.put("create_wm_pool", new create_wm_pool());
+ processMap.put("alter_wm_pool", new alter_wm_pool());
+ processMap.put("drop_wm_pool", new drop_wm_pool());
+ processMap.put("create_or_update_wm_mapping", new create_or_update_wm_mapping());
+ processMap.put("drop_wm_mapping", new drop_wm_mapping());
+ processMap.put("create_or_drop_wm_trigger_to_pool_mapping", new create_or_drop_wm_trigger_to_pool_mapping());
return processMap;
}
@@ -27126,385 +27734,446 @@ import org.slf4j.LoggerFactory;
}
}
- }
-
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class getMetaConf_args implements org.apache.thrift.TBase<getMetaConf_args, getMetaConf_args._Fields>, java.io.Serializable, Cloneable, Comparable<getMetaConf_args> {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_args");
-
- private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1);
-
- private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
- static {
- schemes.put(StandardScheme.class, new getMetaConf_argsStandardSchemeFactory());
- schemes.put(TupleScheme.class, new getMetaConf_argsTupleSchemeFactory());
- }
-
- private String key; // required
-
- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
- public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- KEY((short)1, "key");
-
- private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
- static {
- for (_Fields field : EnumSet.allOf(_Fields.class)) {
- byName.put(field.getFieldName(), field);
- }
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_wm_pool<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, create_wm_pool_args, WMCreatePoolResponse> {
+ public create_wm_pool() {
+ super("create_wm_pool");
}
- /**
- * Find the _Fields constant that matches fieldId, or null if its not found.
- */
- public static _Fields findByThriftId(int fieldId) {
- switch(fieldId) {
- case 1: // KEY
- return KEY;
- default:
- return null;
- }
+ public create_wm_pool_args getEmptyArgsInstance() {
+ return new create_wm_pool_args();
}
- /**
- * Find the _Fields constant that matches fieldId, throwing an exception
- * if it is not found.
- */
- public static _Fields findByThriftIdOrThrow(int fieldId) {
- _Fields fields = findByThriftId(fieldId);
- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
- return fields;
+ public AsyncMethodCallback<WMCreatePoolResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<WMCreatePoolResponse>() {
+ public void onComplete(WMCreatePoolResponse o) {
+ create_wm_pool_result result = new create_wm_pool_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ create_wm_pool_result result = new create_wm_pool_result();
+ if (e instanceof AlreadyExistsException) {
+ result.o1 = (AlreadyExistsException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof NoSuchObjectException) {
+ result.o2 = (NoSuchObjectException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof InvalidObjectException) {
+ result.o3 = (InvalidObjectException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o4 = (MetaException) e;
+ result.setO4IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
}
- /**
- * Find the _Fields constant that matches name, or null if its not found.
- */
- public static _Fields findByName(String name) {
- return byName.get(name);
+ protected boolean isOneway() {
+ return false;
}
- private final short _thriftId;
- private final String _fieldName;
-
- _Fields(short thriftId, String fieldName) {
- _thriftId = thriftId;
- _fieldName = fieldName;
+ public void start(I iface, create_wm_pool_args args, org.apache.thrift.async.AsyncMethodCallback<WMCreatePoolResponse> resultHandler) throws TException {
+ iface.create_wm_pool(args.request,resultHandler);
}
+ }
- public short getThriftFieldId() {
- return _thriftId;
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_wm_pool<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_wm_pool_args, WMAlterPoolResponse> {
+ public alter_wm_pool() {
+ super("alter_wm_pool");
}
- public String getFieldName() {
- return _fieldName;
+ public alter_wm_pool_args getEmptyArgsInstance() {
+ return new alter_wm_pool_args();
}
- }
-
- // isset id assignments
- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
- static {
- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMetaConf_args.class, metaDataMap);
- }
-
- public getMetaConf_args() {
- }
-
- public getMetaConf_args(
- String key)
- {
- this();
- this.key = key;
- }
- /**
- * Performs a deep copy on <i>other</i>.
- */
- public getMetaConf_args(getMetaConf_args other) {
- if (other.isSetKey()) {
- this.key = other.key;
+ public AsyncMethodCallback<WMAlterPoolResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<WMAlterPoolResponse>() {
+ public void onComplete(WMAlterPoolResponse o) {
+ alter_wm_pool_result result = new alter_wm_pool_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ alter_wm_pool_result result = new alter_wm_pool_result();
+ if (e instanceof AlreadyExistsException) {
+ result.o1 = (AlreadyExistsException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof NoSuchObjectException) {
+ result.o2 = (NoSuchObjectException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof InvalidObjectException) {
+ result.o3 = (InvalidObjectException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o4 = (MetaException) e;
+ result.setO4IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
}
- }
-
- public getMetaConf_args deepCopy() {
- return new getMetaConf_args(this);
- }
-
- @Override
- public void clear() {
- this.key = null;
- }
-
- public String getKey() {
- return this.key;
- }
-
- public void setKey(String key) {
- this.key = key;
- }
- public void unsetKey() {
- this.key = null;
- }
-
- /** Returns true if field key is set (has been assigned a value) and false otherwise */
- public boolean isSetKey() {
- return this.key != null;
- }
-
- public void setKeyIsSet(boolean value) {
- if (!value) {
- this.key = null;
+ protected boolean isOneway() {
+ return false;
}
- }
-
- public void setFieldValue(_Fields field, Object value) {
- switch (field) {
- case KEY:
- if (value == null) {
- unsetKey();
- } else {
- setKey((String)value);
- }
- break;
+ public void start(I iface, alter_wm_pool_args args, org.apache.thrift.async.AsyncMethodCallback<WMAlterPoolResponse> resultHandler) throws TException {
+ iface.alter_wm_pool(args.request,resultHandler);
}
}
- public Object getFieldValue(_Fields field) {
- switch (field) {
- case KEY:
- return getKey();
-
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_pool<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_wm_pool_args, WMDropPoolResponse> {
+ public drop_wm_pool() {
+ super("drop_wm_pool");
}
- throw new IllegalStateException();
- }
- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
- public boolean isSet(_Fields field) {
- if (field == null) {
- throw new IllegalArgumentException();
+ public drop_wm_pool_args getEmptyArgsInstance() {
+ return new drop_wm_pool_args();
}
- switch (field) {
- case KEY:
- return isSetKey();
+ public AsyncMethodCallback<WMDropPoolResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<WMDropPoolResponse>() {
+ public void onComplete(WMDropPoolResponse o) {
+ drop_wm_pool_result result = new drop_wm_pool_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ drop_wm_pool_result result = new drop_wm_pool_result();
+ if (e instanceof NoSuchObjectException) {
+ result.o1 = (NoSuchObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof InvalidOperationException) {
+ result.o2 = (InvalidOperationException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
}
- throw new IllegalStateException();
- }
-
- @Override
- public boolean equals(Object that) {
- if (that == null)
- return false;
- if (that instanceof getMetaConf_args)
- return this.equals((getMetaConf_args)that);
- return false;
- }
- public boolean equals(getMetaConf_args that) {
- if (that == null)
+ protected boolean isOneway() {
return false;
-
- boolean this_present_key = true && this.isSetKey();
- boolean that_present_key = true && that.isSetKey();
- if (this_present_key || that_present_key) {
- if (!(this_present_key && that_present_key))
- return false;
- if (!this.key.equals(that.key))
- return false;
}
- return true;
- }
-
- @Override
- public int hashCode() {
- List<Object> list = new ArrayList<Object>();
-
- boolean present_key = true && (isSetKey());
- list.add(present_key);
- if (present_key)
- list.add(key);
-
- return list.hashCode();
+ public void start(I iface, drop_wm_pool_args args, org.apache.thrift.async.AsyncMethodCallback<WMDropPoolResponse> resultHandler) throws TException {
+ iface.drop_wm_pool(args.request,resultHandler);
+ }
}
- @Override
- public int compareTo(getMetaConf_args other) {
- if (!getClass().equals(other.getClass())) {
- return getClass().getName().compareTo(other.getClass().getName());
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_mapping<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, create_or_update_wm_mapping_args, WMCreateOrUpdateMappingResponse> {
+ public create_or_update_wm_mapping() {
+ super("create_or_update_wm_mapping");
}
- int lastComparison = 0;
-
- lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetKey()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key);
- if (lastComparison != 0) {
- return lastComparison;
- }
+ public create_or_update_wm_mapping_args getEmptyArgsInstance() {
+ return new create_or_update_wm_mapping_args();
}
- return 0;
- }
-
- public _Fields fieldForId(int fieldId) {
- return _Fields.findByThriftId(fieldId);
- }
- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
- schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
- }
-
- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
- schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
- }
-
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder("getMetaConf_args(");
- boolean first = true;
-
- sb.append("key:");
- if (this.key == null) {
- sb.append("null");
- } else {
- sb.append(this.key);
+ public AsyncMethodCallback<WMCreateOrUpdateMappingResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<WMCreateOrUpdateMappingResponse>() {
+ public void onComplete(WMCreateOrUpdateMappingResponse o) {
+ create_or_update_wm_mapping_result result = new create_or_update_wm_mapping_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ create_or_update_wm_mapping_result result = new create_or_update_wm_mapping_result();
+ if (e instanceof AlreadyExistsException) {
+ result.o1 = (AlreadyExistsException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof NoSuchObjectException) {
+ result.o2 = (NoSuchObjectException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof InvalidObjectException) {
+ result.o3 = (InvalidObjectException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o4 = (MetaException) e;
+ result.setO4IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
}
- first = false;
- sb.append(")");
- return sb.toString();
- }
- public void validate() throws org.apache.thrift.TException {
- // check for required fields
- // check for sub-struct validity
- }
-
- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
- try {
- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
+ protected boolean isOneway() {
+ return false;
}
- }
- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
- try {
- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
+ public void start(I iface, create_or_update_wm_mapping_args args, org.apache.thrift.async.AsyncMethodCallback<WMCreateOrUpdateMappingResponse> resultHandler) throws TException {
+ iface.create_or_update_wm_mapping(args.request,resultHandler);
}
}
- private static class getMetaConf_argsStandardSchemeFactory implements SchemeFactory {
- public getMetaConf_argsStandardScheme getScheme() {
- return new getMetaConf_argsStandardScheme();
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_mapping<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_wm_mapping_args, WMDropMappingResponse> {
+ public drop_wm_mapping() {
+ super("drop_wm_mapping");
}
- }
- private static class getMetaConf_argsStandardScheme extends StandardScheme<getMetaConf_args> {
+ public drop_wm_mapping_args getEmptyArgsInstance() {
+ return new drop_wm_mapping_args();
+ }
- public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_args struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TField schemeField;
- iprot.readStructBegin();
- while (true)
- {
- schemeField = iprot.readFieldBegin();
- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
- break;
+ public AsyncMethodCallback<WMDropMappingResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<WMDropMappingResponse>() {
+ public void onComplete(WMDropMappingResponse o) {
+ drop_wm_mapping_result result = new drop_wm_mapping_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
}
- switch (schemeField.id) {
- case 1: // KEY
- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
- struct.key = iprot.readString();
- struct.setKeyIsSet(true);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- break;
- default:
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ drop_wm_mapping_result result = new drop_wm_mapping_result();
+ if (e instanceof NoSuchObjectException) {
+ result.o1 = (NoSuchObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof InvalidOperationException) {
+ result.o2 = (InvalidOperationException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
}
- iprot.readFieldEnd();
- }
- iprot.readStructEnd();
- struct.validate();
+ };
}
- public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_args struct) throws org.apache.thrift.TException {
- struct.validate();
-
- oprot.writeStructBegin(STRUCT_DESC);
- if (struct.key != null) {
- oprot.writeFieldBegin(KEY_FIELD_DESC);
- oprot.writeString(struct.key);
- oprot.writeFieldEnd();
- }
- oprot.writeFieldStop();
- oprot.writeStructEnd();
+ protected boolean isOneway() {
+ return false;
}
+ public void start(I iface, drop_wm_mapping_args args, org.apache.thrift.async.AsyncMethodCallback<WMDropMappingResponse> resultHandler) throws TException {
+ iface.drop_wm_mapping(args.request,resultHandler);
+ }
}
- private static class getMetaConf_argsTupleSchemeFactory implements SchemeFactory {
- public getMetaConf_argsTupleScheme getScheme() {
- return new getMetaConf_argsTupleScheme();
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_drop_wm_trigger_to_pool_mapping<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, create_or_drop_wm_trigger_to_pool_mapping_args, WMCreateOrDropTriggerToPoolMappingResponse> {
+ public create_or_drop_wm_trigger_to_pool_mapping() {
+ super("create_or_drop_wm_trigger_to_pool_mapping");
}
- }
- private static class getMetaConf_argsTupleScheme extends TupleScheme<getMetaConf_args> {
+ public create_or_drop_wm_trigger_to_pool_mapping_args getEmptyArgsInstance() {
+ return new create_or_drop_wm_trigger_to_pool_mapping_args();
+ }
- @Override
- public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException {
- TTupleProtocol oprot = (TTupleProtocol) prot;
- BitSet optionals = new BitSet();
- if (struct.isSetKey()) {
- optionals.set(0);
- }
- oprot.writeBitSet(optionals, 1);
- if (struct.isSetKey()) {
- oprot.writeString(struct.key);
- }
+ public AsyncMethodCallback<WMCreateOrDropTriggerToPoolMappingResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<WMCreateOrDropTriggerToPoolMappingResponse>() {
+ public void onComplete(WMCreateOrDropTriggerToPoolMappingResponse o) {
+ create_or_drop_wm_trigger_to_pool_mapping_result result = new create_or_drop_wm_trigger_to_pool_mapping_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ create_or_drop_wm_trigger_to_pool_mapping_result result = new create_or_drop_wm_trigger_to_pool_mapping_result();
+ if (e instanceof AlreadyExistsException) {
+ result.o1 = (AlreadyExistsException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof NoSuchObjectException) {
+ result.o2 = (NoSuchObjectException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof InvalidObjectException) {
+ result.o3 = (InvalidObjectException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o4 = (MetaException) e;
+ result.setO4IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
}
- @Override
- public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException {
- TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(1);
- if (incoming.get(0)) {
- struct.key = iprot.readString();
- struct.setKeyIsSet(true);
- }
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, create_or_drop_wm_trigger_to_pool_mapping_args args, org.apache.thrift.async.AsyncMethodCallback<WMCreateOrDropTriggerToPoolMappingResponse> resultHandler) throws TException {
+ iface.create_or_drop_wm_trigger_to_pool_mapping(args.request,resultHandler);
}
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class getMetaConf_result implements org.apache.thrift.TBase<getMetaConf_result, getMetaConf_result._Fields>, java.io.Serializable, Cloneable, Comparable<getMetaConf_result> {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_result");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class getMetaConf_args implements org.apache.thrift.TBase<getMetaConf_args, getMetaConf_args._Fields>, java.io.Serializable, Cloneable, Comparable<getMetaConf_args> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_args");
- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
- private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
- schemes.put(StandardScheme.class, new getMetaConf_resultStandardSchemeFactory());
- schemes.put(TupleScheme.class, new getMetaConf_resultTupleSchemeFactory());
+ schemes.put(StandardScheme.class, new getMetaConf_argsStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new getMetaConf_argsTupleSchemeFactory());
}
- private String success; // required
- private MetaException o1; // required
+ private String key; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- SUCCESS((short)0, "success"),
- O1((short)1, "o1");
+ KEY((short)1, "key");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -27519,10 +28188,371 @@ import org.slf4j.LoggerFactory;
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
- case 0: // SUCCESS
- return SUCCESS;
- case 1: // O1
- return O1;
+ case 1: // KEY
+ return KEY;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMetaConf_args.class, metaDataMap);
+ }
+
+ public getMetaConf_args() {
+ }
+
+ public getMetaConf_args(
+ String key)
+ {
+ this();
+ this.key = key;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public getMetaConf_args(getMetaConf_args other) {
+ if (other.isSetKey()) {
+ this.key = other.key;
+ }
+ }
+
+ public getMetaConf_args deepCopy() {
+ return new getMetaConf_args(this);
+ }
+
+ @Override
+ public void clear() {
+ this.key = null;
+ }
+
+ public String getKey() {
+ return this.key;
+ }
+
+ public void setKey(String key) {
+ this.key = key;
+ }
+
+ public void unsetKey() {
+ this.key = null;
+ }
+
+ /** Returns true if field key is set (has been assigned a value) and false otherwise */
+ public boolean isSetKey() {
+ return this.key != null;
+ }
+
+ public void setKeyIsSet(boolean value) {
+ if (!value) {
+ this.key = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case KEY:
+ if (value == null) {
+ unsetKey();
+ } else {
+ setKey((String)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case KEY:
+ return getKey();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case KEY:
+ return isSetKey();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof getMetaConf_args)
+ return this.equals((getMetaConf_args)that);
+ return false;
+ }
+
+ public boolean equals(getMetaConf_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_key = true && this.isSetKey();
+ boolean that_present_key = true && that.isSetKey();
+ if (this_present_key || that_present_key) {
+ if (!(this_present_key && that_present_key))
+ return false;
+ if (!this.key.equals(that.key))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_key = true && (isSetKey());
+ list.add(present_key);
+ if (present_key)
+ list.add(key);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(getMetaConf_args other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetKey()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("getMetaConf_args(");
+ boolean first = true;
+
+ sb.append("key:");
+ if (this.key == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.key);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class getMetaConf_argsStandardSchemeFactory implements SchemeFactory {
+ public getMetaConf_argsStandardScheme getScheme() {
+ return new getMetaConf_argsStandardScheme();
+ }
+ }
+
+ private static class getMetaConf_argsStandardScheme extends StandardScheme<getMetaConf_args> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_args struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // KEY
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.key = iprot.readString();
+ struct.setKeyIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_args struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.key != null) {
+ oprot.writeFieldBegin(KEY_FIELD_DESC);
+ oprot.writeString(struct.key);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class getMetaConf_argsTupleSchemeFactory implements SchemeFactory {
+ public getMetaConf_argsTupleScheme getScheme() {
+ return new getMetaConf_argsTupleScheme();
+ }
+ }
+
+ private static class getMetaConf_argsTupleScheme extends TupleScheme<getMetaConf_args> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetKey()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetKey()) {
+ oprot.writeString(struct.key);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.key = iprot.readString();
+ struct.setKeyIsSet(true);
+ }
+ }
+ }
+
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class getMetaConf_result implements org.apache.thrift.TBase<getMetaConf_result, getMetaConf_result._Fields>, java.io.Serializable, Cloneable, Comparable<getMetaConf_result> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_result");
+
+ private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
+ private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new getMetaConf_resultStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new getMetaConf_resultTupleSchemeFactory());
+ }
+
+ private String success; // required
+ private MetaException o1; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ SUCCESS((short)0, "success"),
+ O1((short)1, "o1");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 0: // SUCCESS
+ return SUCCESS;
+ case 1: // O1
+ return O1;
default:
return null;
}
@@ -191242,16 +192272,5644 @@ import org.slf4j.LoggerFactory;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateResourcePlanRequest.class)));
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateResourcePlanRequest.class)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_resource_plan_args.class, metaDataMap);
+ }
+
+ public create_resource_plan_args() {
+ }
+
+ public create_resource_plan_args(
+ WMCreateResourcePlanRequest request)
+ {
+ this();
+ this.request = request;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public create_resource_plan_args(create_resource_plan_args other) {
+ if (other.isSetRequest()) {
+ this.request = new WMCreateResourcePlanRequest(other.request);
+ }
+ }
+
+ public create_resource_plan_args deepCopy() {
+ return new create_resource_plan_args(this);
+ }
+
+ @Override
+ public void clear() {
+ this.request = null;
+ }
+
+ public WMCreateResourcePlanRequest getRequest() {
+ return this.request;
+ }
+
+ public void setRequest(WMCreateResourcePlanRequest request) {
+ this.request = request;
+ }
+
+ public void unsetRequest() {
+ this.request = null;
+ }
+
+ /** Returns true if field request is set (has been assigned a value) and false otherwise */
+ public boolean isSetRequest() {
+ return this.request != null;
+ }
+
+ public void setRequestIsSet(boolean value) {
+ if (!value) {
+ this.request = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case REQUEST:
+ if (value == null) {
+ unsetRequest();
+ } else {
+ setRequest((WMCreateResourcePlanRequest)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case REQUEST:
+ return getRequest();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case REQUEST:
+ return isSetRequest();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof create_resource_plan_args)
+ return this.equals((create_resource_plan_args)that);
+ return false;
+ }
+
+ public boolean equals(create_resource_plan_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_request = true && this.isSetRequest();
+ boolean that_present_request = true && that.isSetRequest();
+ if (this_present_request || that_present_request) {
+ if (!(this_present_request && that_present_request))
+ return false;
+ if (!this.request.equals(that.request))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_request = true && (isSetRequest());
+ list.add(present_request);
+ if (present_request)
+ list.add(request);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(create_resource_plan_args other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetRequest()).compareTo(other.isSetRequest());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetRequest()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, other.request);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("create_resource_plan_args(");
+ boolean first = true;
+
+ sb.append("request:");
+ if (this.request == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.request);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ if (request != null) {
+ request.validate();
+ }
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class create_resource_plan_argsStandardSchemeFactory implements SchemeFactory {
+ public create_resource_plan_argsStandardScheme getScheme() {
+ return new create_resource_plan_argsStandardScheme();
+ }
+ }
+
+ private static class create_resource_plan_argsStandardScheme extends StandardScheme<create_resource_plan_args> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_plan_args struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // REQUEST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.request = new WMCreateResourcePlanRequest();
+ struct.request.read(iprot);
+ struct.setRequestIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, create_resource_plan_args struct) throws org.apache.thrift.TException {
+ struct.valida
<TRUNCATED>
[06/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreatePoolRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreatePoolRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreatePoolRequest.java
new file mode 100644
index 0000000..f08b359
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreatePoolRequest.java
@@ -0,0 +1,398 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreatePoolRequest implements org.apache.thrift.TBase<WMCreatePoolRequest, WMCreatePoolRequest._Fields>, java.io.Serializable, Cloneable, Comparable<WMCreatePoolRequest> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreatePoolRequest");
+
+ private static final org.apache.thrift.protocol.TField POOL_FIELD_DESC = new org.apache.thrift.protocol.TField("pool", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMCreatePoolRequestStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMCreatePoolRequestTupleSchemeFactory());
+ }
+
+ private WMPool pool; // optional
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ POOL((short)1, "pool");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // POOL
+ return POOL;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final _Fields optionals[] = {_Fields.POOL};
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.POOL, new org.apache.thrift.meta_data.FieldMetaData("pool", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMPool.class)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreatePoolRequest.class, metaDataMap);
+ }
+
+ public WMCreatePoolRequest() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMCreatePoolRequest(WMCreatePoolRequest other) {
+ if (other.isSetPool()) {
+ this.pool = new WMPool(other.pool);
+ }
+ }
+
+ public WMCreatePoolRequest deepCopy() {
+ return new WMCreatePoolRequest(this);
+ }
+
+ @Override
+ public void clear() {
+ this.pool = null;
+ }
+
+ public WMPool getPool() {
+ return this.pool;
+ }
+
+ public void setPool(WMPool pool) {
+ this.pool = pool;
+ }
+
+ public void unsetPool() {
+ this.pool = null;
+ }
+
+ /** Returns true if field pool is set (has been assigned a value) and false otherwise */
+ public boolean isSetPool() {
+ return this.pool != null;
+ }
+
+ public void setPoolIsSet(boolean value) {
+ if (!value) {
+ this.pool = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case POOL:
+ if (value == null) {
+ unsetPool();
+ } else {
+ setPool((WMPool)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case POOL:
+ return getPool();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case POOL:
+ return isSetPool();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMCreatePoolRequest)
+ return this.equals((WMCreatePoolRequest)that);
+ return false;
+ }
+
+ public boolean equals(WMCreatePoolRequest that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_pool = true && this.isSetPool();
+ boolean that_present_pool = true && that.isSetPool();
+ if (this_present_pool || that_present_pool) {
+ if (!(this_present_pool && that_present_pool))
+ return false;
+ if (!this.pool.equals(that.pool))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_pool = true && (isSetPool());
+ list.add(present_pool);
+ if (present_pool)
+ list.add(pool);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMCreatePoolRequest other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetPool()).compareTo(other.isSetPool());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetPool()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pool, other.pool);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMCreatePoolRequest(");
+ boolean first = true;
+
+ if (isSetPool()) {
+ sb.append("pool:");
+ if (this.pool == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.pool);
+ }
+ first = false;
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ if (pool != null) {
+ pool.validate();
+ }
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMCreatePoolRequestStandardSchemeFactory implements SchemeFactory {
+ public WMCreatePoolRequestStandardScheme getScheme() {
+ return new WMCreatePoolRequestStandardScheme();
+ }
+ }
+
+ private static class WMCreatePoolRequestStandardScheme extends StandardScheme<WMCreatePoolRequest> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreatePoolRequest struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // POOL
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.pool = new WMPool();
+ struct.pool.read(iprot);
+ struct.setPoolIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreatePoolRequest struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.pool != null) {
+ if (struct.isSetPool()) {
+ oprot.writeFieldBegin(POOL_FIELD_DESC);
+ struct.pool.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMCreatePoolRequestTupleSchemeFactory implements SchemeFactory {
+ public WMCreatePoolRequestTupleScheme getScheme() {
+ return new WMCreatePoolRequestTupleScheme();
+ }
+ }
+
+ private static class WMCreatePoolRequestTupleScheme extends TupleScheme<WMCreatePoolRequest> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMCreatePoolRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetPool()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetPool()) {
+ struct.pool.write(oprot);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMCreatePoolRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.pool = new WMPool();
+ struct.pool.read(iprot);
+ struct.setPoolIsSet(true);
+ }
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreatePoolResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreatePoolResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreatePoolResponse.java
new file mode 100644
index 0000000..559809a
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreatePoolResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreatePoolResponse implements org.apache.thrift.TBase<WMCreatePoolResponse, WMCreatePoolResponse._Fields>, java.io.Serializable, Cloneable, Comparable<WMCreatePoolResponse> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreatePoolResponse");
+
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMCreatePoolResponseStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMCreatePoolResponseTupleSchemeFactory());
+ }
+
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreatePoolResponse.class, metaDataMap);
+ }
+
+ public WMCreatePoolResponse() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMCreatePoolResponse(WMCreatePoolResponse other) {
+ }
+
+ public WMCreatePoolResponse deepCopy() {
+ return new WMCreatePoolResponse(this);
+ }
+
+ @Override
+ public void clear() {
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMCreatePoolResponse)
+ return this.equals((WMCreatePoolResponse)that);
+ return false;
+ }
+
+ public boolean equals(WMCreatePoolResponse that) {
+ if (that == null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMCreatePoolResponse other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMCreatePoolResponse(");
+ boolean first = true;
+
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMCreatePoolResponseStandardSchemeFactory implements SchemeFactory {
+ public WMCreatePoolResponseStandardScheme getScheme() {
+ return new WMCreatePoolResponseStandardScheme();
+ }
+ }
+
+ private static class WMCreatePoolResponseStandardScheme extends StandardScheme<WMCreatePoolResponse> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreatePoolResponse struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreatePoolResponse struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMCreatePoolResponseTupleSchemeFactory implements SchemeFactory {
+ public WMCreatePoolResponseTupleScheme getScheme() {
+ return new WMCreatePoolResponseTupleScheme();
+ }
+ }
+
+ private static class WMCreatePoolResponseTupleScheme extends TupleScheme<WMCreatePoolResponse> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMCreatePoolResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMCreatePoolResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingRequest.java
new file mode 100644
index 0000000..4fdf402
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingRequest.java
@@ -0,0 +1,398 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMDropMappingRequest implements org.apache.thrift.TBase<WMDropMappingRequest, WMDropMappingRequest._Fields>, java.io.Serializable, Cloneable, Comparable<WMDropMappingRequest> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMDropMappingRequest");
+
+ private static final org.apache.thrift.protocol.TField MAPPING_FIELD_DESC = new org.apache.thrift.protocol.TField("mapping", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMDropMappingRequestStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMDropMappingRequestTupleSchemeFactory());
+ }
+
+ private WMMapping mapping; // optional
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ MAPPING((short)1, "mapping");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // MAPPING
+ return MAPPING;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final _Fields optionals[] = {_Fields.MAPPING};
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.MAPPING, new org.apache.thrift.meta_data.FieldMetaData("mapping", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMMapping.class)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMDropMappingRequest.class, metaDataMap);
+ }
+
+ public WMDropMappingRequest() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMDropMappingRequest(WMDropMappingRequest other) {
+ if (other.isSetMapping()) {
+ this.mapping = new WMMapping(other.mapping);
+ }
+ }
+
+ public WMDropMappingRequest deepCopy() {
+ return new WMDropMappingRequest(this);
+ }
+
+ @Override
+ public void clear() {
+ this.mapping = null;
+ }
+
+ public WMMapping getMapping() {
+ return this.mapping;
+ }
+
+ public void setMapping(WMMapping mapping) {
+ this.mapping = mapping;
+ }
+
+ public void unsetMapping() {
+ this.mapping = null;
+ }
+
+ /** Returns true if field mapping is set (has been assigned a value) and false otherwise */
+ public boolean isSetMapping() {
+ return this.mapping != null;
+ }
+
+ public void setMappingIsSet(boolean value) {
+ if (!value) {
+ this.mapping = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case MAPPING:
+ if (value == null) {
+ unsetMapping();
+ } else {
+ setMapping((WMMapping)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case MAPPING:
+ return getMapping();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case MAPPING:
+ return isSetMapping();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMDropMappingRequest)
+ return this.equals((WMDropMappingRequest)that);
+ return false;
+ }
+
+ public boolean equals(WMDropMappingRequest that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_mapping = true && this.isSetMapping();
+ boolean that_present_mapping = true && that.isSetMapping();
+ if (this_present_mapping || that_present_mapping) {
+ if (!(this_present_mapping && that_present_mapping))
+ return false;
+ if (!this.mapping.equals(that.mapping))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_mapping = true && (isSetMapping());
+ list.add(present_mapping);
+ if (present_mapping)
+ list.add(mapping);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMDropMappingRequest other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetMapping()).compareTo(other.isSetMapping());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetMapping()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mapping, other.mapping);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMDropMappingRequest(");
+ boolean first = true;
+
+ if (isSetMapping()) {
+ sb.append("mapping:");
+ if (this.mapping == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.mapping);
+ }
+ first = false;
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ if (mapping != null) {
+ mapping.validate();
+ }
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMDropMappingRequestStandardSchemeFactory implements SchemeFactory {
+ public WMDropMappingRequestStandardScheme getScheme() {
+ return new WMDropMappingRequestStandardScheme();
+ }
+ }
+
+ private static class WMDropMappingRequestStandardScheme extends StandardScheme<WMDropMappingRequest> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMDropMappingRequest struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // MAPPING
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.mapping = new WMMapping();
+ struct.mapping.read(iprot);
+ struct.setMappingIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMDropMappingRequest struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.mapping != null) {
+ if (struct.isSetMapping()) {
+ oprot.writeFieldBegin(MAPPING_FIELD_DESC);
+ struct.mapping.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMDropMappingRequestTupleSchemeFactory implements SchemeFactory {
+ public WMDropMappingRequestTupleScheme getScheme() {
+ return new WMDropMappingRequestTupleScheme();
+ }
+ }
+
+ private static class WMDropMappingRequestTupleScheme extends TupleScheme<WMDropMappingRequest> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMDropMappingRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetMapping()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetMapping()) {
+ struct.mapping.write(oprot);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMDropMappingRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.mapping = new WMMapping();
+ struct.mapping.read(iprot);
+ struct.setMappingIsSet(true);
+ }
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingResponse.java
new file mode 100644
index 0000000..f3fd4b3
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMDropMappingResponse implements org.apache.thrift.TBase<WMDropMappingResponse, WMDropMappingResponse._Fields>, java.io.Serializable, Cloneable, Comparable<WMDropMappingResponse> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMDropMappingResponse");
+
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMDropMappingResponseStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMDropMappingResponseTupleSchemeFactory());
+ }
+
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMDropMappingResponse.class, metaDataMap);
+ }
+
+ public WMDropMappingResponse() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMDropMappingResponse(WMDropMappingResponse other) {
+ }
+
+ public WMDropMappingResponse deepCopy() {
+ return new WMDropMappingResponse(this);
+ }
+
+ @Override
+ public void clear() {
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMDropMappingResponse)
+ return this.equals((WMDropMappingResponse)that);
+ return false;
+ }
+
+ public boolean equals(WMDropMappingResponse that) {
+ if (that == null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMDropMappingResponse other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMDropMappingResponse(");
+ boolean first = true;
+
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMDropMappingResponseStandardSchemeFactory implements SchemeFactory {
+ public WMDropMappingResponseStandardScheme getScheme() {
+ return new WMDropMappingResponseStandardScheme();
+ }
+ }
+
+ private static class WMDropMappingResponseStandardScheme extends StandardScheme<WMDropMappingResponse> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMDropMappingResponse struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMDropMappingResponse struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMDropMappingResponseTupleSchemeFactory implements SchemeFactory {
+ public WMDropMappingResponseTupleScheme getScheme() {
+ return new WMDropMappingResponseTupleScheme();
+ }
+ }
+
+ private static class WMDropMappingResponseTupleScheme extends TupleScheme<WMDropMappingResponse> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMDropMappingResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMDropMappingResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolRequest.java
new file mode 100644
index 0000000..114cdde
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolRequest.java
@@ -0,0 +1,499 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMDropPoolRequest implements org.apache.thrift.TBase<WMDropPoolRequest, WMDropPoolRequest._Fields>, java.io.Serializable, Cloneable, Comparable<WMDropPoolRequest> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMDropPoolRequest");
+
+ private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlanName", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField POOL_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("poolPath", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMDropPoolRequestStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMDropPoolRequestTupleSchemeFactory());
+ }
+
+ private String resourcePlanName; // optional
+ private String poolPath; // optional
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ RESOURCE_PLAN_NAME((short)1, "resourcePlanName"),
+ POOL_PATH((short)2, "poolPath");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // RESOURCE_PLAN_NAME
+ return RESOURCE_PLAN_NAME;
+ case 2: // POOL_PATH
+ return POOL_PATH;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final _Fields optionals[] = {_Fields.RESOURCE_PLAN_NAME,_Fields.POOL_PATH};
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.RESOURCE_PLAN_NAME, new org.apache.thrift.meta_data.FieldMetaData("resourcePlanName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.POOL_PATH, new org.apache.thrift.meta_data.FieldMetaData("poolPath", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMDropPoolRequest.class, metaDataMap);
+ }
+
+ public WMDropPoolRequest() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMDropPoolRequest(WMDropPoolRequest other) {
+ if (other.isSetResourcePlanName()) {
+ this.resourcePlanName = other.resourcePlanName;
+ }
+ if (other.isSetPoolPath()) {
+ this.poolPath = other.poolPath;
+ }
+ }
+
+ public WMDropPoolRequest deepCopy() {
+ return new WMDropPoolRequest(this);
+ }
+
+ @Override
+ public void clear() {
+ this.resourcePlanName = null;
+ this.poolPath = null;
+ }
+
+ public String getResourcePlanName() {
+ return this.resourcePlanName;
+ }
+
+ public void setResourcePlanName(String resourcePlanName) {
+ this.resourcePlanName = resourcePlanName;
+ }
+
+ public void unsetResourcePlanName() {
+ this.resourcePlanName = null;
+ }
+
+ /** Returns true if field resourcePlanName is set (has been assigned a value) and false otherwise */
+ public boolean isSetResourcePlanName() {
+ return this.resourcePlanName != null;
+ }
+
+ public void setResourcePlanNameIsSet(boolean value) {
+ if (!value) {
+ this.resourcePlanName = null;
+ }
+ }
+
+ public String getPoolPath() {
+ return this.poolPath;
+ }
+
+ public void setPoolPath(String poolPath) {
+ this.poolPath = poolPath;
+ }
+
+ public void unsetPoolPath() {
+ this.poolPath = null;
+ }
+
+ /** Returns true if field poolPath is set (has been assigned a value) and false otherwise */
+ public boolean isSetPoolPath() {
+ return this.poolPath != null;
+ }
+
+ public void setPoolPathIsSet(boolean value) {
+ if (!value) {
+ this.poolPath = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case RESOURCE_PLAN_NAME:
+ if (value == null) {
+ unsetResourcePlanName();
+ } else {
+ setResourcePlanName((String)value);
+ }
+ break;
+
+ case POOL_PATH:
+ if (value == null) {
+ unsetPoolPath();
+ } else {
+ setPoolPath((String)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case RESOURCE_PLAN_NAME:
+ return getResourcePlanName();
+
+ case POOL_PATH:
+ return getPoolPath();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case RESOURCE_PLAN_NAME:
+ return isSetResourcePlanName();
+ case POOL_PATH:
+ return isSetPoolPath();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMDropPoolRequest)
+ return this.equals((WMDropPoolRequest)that);
+ return false;
+ }
+
+ public boolean equals(WMDropPoolRequest that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_resourcePlanName = true && this.isSetResourcePlanName();
+ boolean that_present_resourcePlanName = true && that.isSetResourcePlanName();
+ if (this_present_resourcePlanName || that_present_resourcePlanName) {
+ if (!(this_present_resourcePlanName && that_present_resourcePlanName))
+ return false;
+ if (!this.resourcePlanName.equals(that.resourcePlanName))
+ return false;
+ }
+
+ boolean this_present_poolPath = true && this.isSetPoolPath();
+ boolean that_present_poolPath = true && that.isSetPoolPath();
+ if (this_present_poolPath || that_present_poolPath) {
+ if (!(this_present_poolPath && that_present_poolPath))
+ return false;
+ if (!this.poolPath.equals(that.poolPath))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_resourcePlanName = true && (isSetResourcePlanName());
+ list.add(present_resourcePlanName);
+ if (present_resourcePlanName)
+ list.add(resourcePlanName);
+
+ boolean present_poolPath = true && (isSetPoolPath());
+ list.add(present_poolPath);
+ if (present_poolPath)
+ list.add(poolPath);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMDropPoolRequest other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetResourcePlanName()).compareTo(other.isSetResourcePlanName());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetResourcePlanName()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourcePlanName, other.resourcePlanName);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetPoolPath()).compareTo(other.isSetPoolPath());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetPoolPath()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolPath, other.poolPath);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMDropPoolRequest(");
+ boolean first = true;
+
+ if (isSetResourcePlanName()) {
+ sb.append("resourcePlanName:");
+ if (this.resourcePlanName == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.resourcePlanName);
+ }
+ first = false;
+ }
+ if (isSetPoolPath()) {
+ if (!first) sb.append(", ");
+ sb.append("poolPath:");
+ if (this.poolPath == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.poolPath);
+ }
+ first = false;
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMDropPoolRequestStandardSchemeFactory implements SchemeFactory {
+ public WMDropPoolRequestStandardScheme getScheme() {
+ return new WMDropPoolRequestStandardScheme();
+ }
+ }
+
+ private static class WMDropPoolRequestStandardScheme extends StandardScheme<WMDropPoolRequest> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMDropPoolRequest struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // RESOURCE_PLAN_NAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.resourcePlanName = iprot.readString();
+ struct.setResourcePlanNameIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // POOL_PATH
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.poolPath = iprot.readString();
+ struct.setPoolPathIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMDropPoolRequest struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.resourcePlanName != null) {
+ if (struct.isSetResourcePlanName()) {
+ oprot.writeFieldBegin(RESOURCE_PLAN_NAME_FIELD_DESC);
+ oprot.writeString(struct.resourcePlanName);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.poolPath != null) {
+ if (struct.isSetPoolPath()) {
+ oprot.writeFieldBegin(POOL_PATH_FIELD_DESC);
+ oprot.writeString(struct.poolPath);
+ oprot.writeFieldEnd();
+ }
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMDropPoolRequestTupleSchemeFactory implements SchemeFactory {
+ public WMDropPoolRequestTupleScheme getScheme() {
+ return new WMDropPoolRequestTupleScheme();
+ }
+ }
+
+ private static class WMDropPoolRequestTupleScheme extends TupleScheme<WMDropPoolRequest> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMDropPoolRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetResourcePlanName()) {
+ optionals.set(0);
+ }
+ if (struct.isSetPoolPath()) {
+ optionals.set(1);
+ }
+ oprot.writeBitSet(optionals, 2);
+ if (struct.isSetResourcePlanName()) {
+ oprot.writeString(struct.resourcePlanName);
+ }
+ if (struct.isSetPoolPath()) {
+ oprot.writeString(struct.poolPath);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMDropPoolRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(2);
+ if (incoming.get(0)) {
+ struct.resourcePlanName = iprot.readString();
+ struct.setResourcePlanNameIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.poolPath = iprot.readString();
+ struct.setPoolPathIsSet(true);
+ }
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolResponse.java
new file mode 100644
index 0000000..602754c
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMDropPoolResponse implements org.apache.thrift.TBase<WMDropPoolResponse, WMDropPoolResponse._Fields>, java.io.Serializable, Cloneable, Comparable<WMDropPoolResponse> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMDropPoolResponse");
+
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WMDropPoolResponseStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WMDropPoolResponseTupleSchemeFactory());
+ }
+
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMDropPoolResponse.class, metaDataMap);
+ }
+
+ public WMDropPoolResponse() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WMDropPoolResponse(WMDropPoolResponse other) {
+ }
+
+ public WMDropPoolResponse deepCopy() {
+ return new WMDropPoolResponse(this);
+ }
+
+ @Override
+ public void clear() {
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WMDropPoolResponse)
+ return this.equals((WMDropPoolResponse)that);
+ return false;
+ }
+
+ public boolean equals(WMDropPoolResponse that) {
+ if (that == null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WMDropPoolResponse other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WMDropPoolResponse(");
+ boolean first = true;
+
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WMDropPoolResponseStandardSchemeFactory implements SchemeFactory {
+ public WMDropPoolResponseStandardScheme getScheme() {
+ return new WMDropPoolResponseStandardScheme();
+ }
+ }
+
+ private static class WMDropPoolResponseStandardScheme extends StandardScheme<WMDropPoolResponse> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WMDropPoolResponse struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WMDropPoolResponse struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WMDropPoolResponseTupleSchemeFactory implements SchemeFactory {
+ public WMDropPoolResponseTupleScheme getScheme() {
+ return new WMDropPoolResponseTupleScheme();
+ }
+ }
+
+ private static class WMDropPoolResponseTupleScheme extends TupleScheme<WMDropPoolResponse> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WMDropPoolResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WMDropPoolResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java
index af7ee52..8b7d41a 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java
@@ -41,7 +41,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlanName", org.apache.thrift.protocol.TType.STRING, (short)1);
private static final org.apache.thrift.protocol.TField ENTITY_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("entityType", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final org.apache.thrift.protocol.TField ENTITY_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("entityName", org.apache.thrift.protocol.TType.STRING, (short)3);
- private static final org.apache.thrift.protocol.TField POOL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("poolName", org.apache.thrift.protocol.TType.STRING, (short)4);
+ private static final org.apache.thrift.protocol.TField POOL_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("poolPath", org.apache.thrift.protocol.TType.STRING, (short)4);
private static final org.apache.thrift.protocol.TField ORDERING_FIELD_DESC = new org.apache.thrift.protocol.TField("ordering", org.apache.thrift.protocol.TType.I32, (short)5);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
@@ -53,7 +53,7 @@ import org.slf4j.LoggerFactory;
private String resourcePlanName; // required
private String entityType; // required
private String entityName; // required
- private String poolName; // optional
+ private String poolPath; // optional
private int ordering; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -61,7 +61,7 @@ import org.slf4j.LoggerFactory;
RESOURCE_PLAN_NAME((short)1, "resourcePlanName"),
ENTITY_TYPE((short)2, "entityType"),
ENTITY_NAME((short)3, "entityName"),
- POOL_NAME((short)4, "poolName"),
+ POOL_PATH((short)4, "poolPath"),
ORDERING((short)5, "ordering");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -83,8 +83,8 @@ import org.slf4j.LoggerFactory;
return ENTITY_TYPE;
case 3: // ENTITY_NAME
return ENTITY_NAME;
- case 4: // POOL_NAME
- return POOL_NAME;
+ case 4: // POOL_PATH
+ return POOL_PATH;
case 5: // ORDERING
return ORDERING;
default:
@@ -129,7 +129,7 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __ORDERING_ISSET_ID = 0;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.POOL_NAME,_Fields.ORDERING};
+ private static final _Fields optionals[] = {_Fields.POOL_PATH,_Fields.ORDERING};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -139,7 +139,7 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.ENTITY_NAME, new org.apache.thrift.meta_data.FieldMetaData("entityName", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.POOL_NAME, new org.apache.thrift.meta_data.FieldMetaData("poolName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ tmpMap.put(_Fields.POOL_PATH, new org.apache.thrift.meta_data.FieldMetaData("poolPath", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.ORDERING, new org.apache.thrift.meta_data.FieldMetaData("ordering", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
@@ -175,8 +175,8 @@ import org.slf4j.LoggerFactory;
if (other.isSetEntityName()) {
this.entityName = other.entityName;
}
- if (other.isSetPoolName()) {
- this.poolName = other.poolName;
+ if (other.isSetPoolPath()) {
+ this.poolPath = other.poolPath;
}
this.ordering = other.ordering;
}
@@ -190,7 +190,7 @@ import org.slf4j.LoggerFactory;
this.resourcePlanName = null;
this.entityType = null;
this.entityName = null;
- this.poolName = null;
+ this.poolPath = null;
setOrderingIsSet(false);
this.ordering = 0;
}
@@ -264,26 +264,26 @@ import org.slf4j.LoggerFactory;
}
}
- public String getPoolName() {
- return this.poolName;
+ public String getPoolPath() {
+ return this.poolPath;
}
- public void setPoolName(String poolName) {
- this.poolName = poolName;
+ public void setPoolPath(String poolPath) {
+ this.poolPath = poolPath;
}
- public void unsetPoolName() {
- this.poolName = null;
+ public void unsetPoolPath() {
+ this.poolPath = null;
}
- /** Returns true if field poolName is set (has been assigned a value) and false otherwise */
- public boolean isSetPoolName() {
- return this.poolName != null;
+ /** Returns true if field poolPath is set (has been assigned a value) and false otherwise */
+ public boolean isSetPoolPath() {
+ return this.poolPath != null;
}
- public void setPoolNameIsSet(boolean value) {
+ public void setPoolPathIsSet(boolean value) {
if (!value) {
- this.poolName = null;
+ this.poolPath = null;
}
}
@@ -335,11 +335,11 @@ import org.slf4j.LoggerFactory;
}
break;
- case POOL_NAME:
+ case POOL_PATH:
if (value == null) {
- unsetPoolName();
+ unsetPoolPath();
} else {
- setPoolName((String)value);
+ setPoolPath((String)value);
}
break;
@@ -365,8 +365,8 @@ import org.slf4j.LoggerFactory;
case ENTITY_NAME:
return getEntityName();
- case POOL_NAME:
- return getPoolName();
+ case POOL_PATH:
+ return getPoolPath();
case ORDERING:
return getOrdering();
@@ -388,8 +388,8 @@ import org.slf4j.LoggerFactory;
return isSetEntityType();
case ENTITY_NAME:
return isSetEntityName();
- case POOL_NAME:
- return isSetPoolName();
+ case POOL_PATH:
+ return isSetPoolPath();
case ORDERING:
return isSetOrdering();
}
@@ -436,12 +436,12 @@ import org.slf4j.LoggerFactory;
return false;
}
- boolean this_present_poolName = true && this.isSetPoolName();
- boolean that_present_poolName = true && that.isSetPoolName();
- if (this_present_poolName || that_present_poolName) {
- if (!(this_present_poolName && that_present_poolName))
+ boolean this_present_poolPath = true && this.isSetPoolPath();
+ boolean that_present_poolPath = true && that.isSetPoolPath();
+ if (this_present_poolPath || that_present_poolPath) {
+ if (!(this_present_poolPath && that_present_poolPath))
return false;
- if (!this.poolName.equals(that.poolName))
+ if (!this.poolPath.equals(that.poolPath))
return false;
}
@@ -476,10 +476,10 @@ import org.slf4j.LoggerFactory;
if (present_entityName)
list.add(entityName);
- boolean present_poolName = true && (isSetPoolName());
- list.add(present_poolName);
- if (present_poolName)
- list.add(poolName);
+ boolean present_poolPath = true && (isSetPoolPath());
+ list.add(present_poolPath);
+ if (present_poolPath)
+ list.add(poolPath);
boolean present_ordering = true && (isSetOrdering());
list.add(present_ordering);
@@ -527,12 +527,12 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
- lastComparison = Boolean.valueOf(isSetPoolName()).compareTo(other.isSetPoolName());
+ lastComparison = Boolean.valueOf(isSetPoolPath()).compareTo(other.isSetPoolPath());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetPoolName()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolName, other.poolName);
+ if (isSetPoolPath()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolPath, other.poolPath);
if (lastComparison != 0) {
return lastComparison;
}
@@ -590,13 +590,13 @@ import org.slf4j.LoggerFactory;
sb.append(this.entityName);
}
first = false;
- if (isSetPoolName()) {
+ if (isSetPoolPath()) {
if (!first) sb.append(", ");
- sb.append("poolName:");
- if (this.poolName == null) {
+ sb.append("poolPath:");
+ if (this.poolPath == null) {
sb.append("null");
} else {
- sb.append(this.poolName);
+ sb.append(this.poolPath);
}
first = false;
}
@@ -687,10 +687,10 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
- case 4: // POOL_NAME
+ case 4: // POOL_PATH
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
- struct.poolName = iprot.readString();
- struct.setPoolNameIsSet(true);
+ struct.poolPath = iprot.readString();
+ struct.setPoolPathIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -731,10 +731,10 @@ import org.slf4j.LoggerFactory;
oprot.writeString(struct.entityName);
oprot.writeFieldEnd();
}
- if (struct.poolName != null) {
- if (struct.isSetPoolName()) {
- oprot.writeFieldBegin(POOL_NAME_FIELD_DESC);
- oprot.writeString(struct.poolName);
+ if (struct.poolPath != null) {
+ if (struct.isSetPoolPath()) {
+ oprot.writeFieldBegin(POOL_PATH_FIELD_DESC);
+ oprot.writeString(struct.poolPath);
oprot.writeFieldEnd();
}
}
@@ -764,15 +764,15 @@ import org.slf4j.LoggerFactory;
oprot.writeString(struct.entityType);
oprot.writeString(struct.entityName);
BitSet optionals = new BitSet();
- if (struct.isSetPoolName()) {
+ if (struct.isSetPoolPath()) {
optionals.set(0);
}
if (struct.isSetOrdering()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
- if (struct.isSetPoolName()) {
- oprot.writeString(struct.poolName);
+ if (struct.isSetPoolPath()) {
+ oprot.writeString(struct.poolPath);
}
if (struct.isSetOrdering()) {
oprot.writeI32(struct.ordering);
@@ -790,8 +790,8 @@ import org.slf4j.LoggerFactory;
struct.setEntityNameIsSet(true);
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
- struct.poolName = iprot.readString();
- struct.setPoolNameIsSet(true);
+ struct.poolPath = iprot.readString();
+ struct.setPoolPathIsSet(true);
}
if (incoming.get(1)) {
struct.ordering = iprot.readI32();
[03/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 0351cd8..807e6b7 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -1370,6 +1370,48 @@ class Iface(fb303.FacebookService.Iface):
"""
pass
+ def create_wm_pool(self, request):
+ """
+ Parameters:
+ - request
+ """
+ pass
+
+ def alter_wm_pool(self, request):
+ """
+ Parameters:
+ - request
+ """
+ pass
+
+ def drop_wm_pool(self, request):
+ """
+ Parameters:
+ - request
+ """
+ pass
+
+ def create_or_update_wm_mapping(self, request):
+ """
+ Parameters:
+ - request
+ """
+ pass
+
+ def drop_wm_mapping(self, request):
+ """
+ Parameters:
+ - request
+ """
+ pass
+
+ def create_or_drop_wm_trigger_to_pool_mapping(self, request):
+ """
+ Parameters:
+ - request
+ """
+ pass
+
class Client(fb303.FacebookService.Client, Iface):
"""
@@ -7599,6 +7641,236 @@ class Client(fb303.FacebookService.Client, Iface):
raise result.o2
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_triggers_for_resourceplan failed: unknown result")
+ def create_wm_pool(self, request):
+ """
+ Parameters:
+ - request
+ """
+ self.send_create_wm_pool(request)
+ return self.recv_create_wm_pool()
+
+ def send_create_wm_pool(self, request):
+ self._oprot.writeMessageBegin('create_wm_pool', TMessageType.CALL, self._seqid)
+ args = create_wm_pool_args()
+ args.request = request
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_create_wm_pool(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = create_wm_pool_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ if result.o3 is not None:
+ raise result.o3
+ if result.o4 is not None:
+ raise result.o4
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "create_wm_pool failed: unknown result")
+
+ def alter_wm_pool(self, request):
+ """
+ Parameters:
+ - request
+ """
+ self.send_alter_wm_pool(request)
+ return self.recv_alter_wm_pool()
+
+ def send_alter_wm_pool(self, request):
+ self._oprot.writeMessageBegin('alter_wm_pool', TMessageType.CALL, self._seqid)
+ args = alter_wm_pool_args()
+ args.request = request
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_alter_wm_pool(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = alter_wm_pool_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ if result.o3 is not None:
+ raise result.o3
+ if result.o4 is not None:
+ raise result.o4
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_wm_pool failed: unknown result")
+
+ def drop_wm_pool(self, request):
+ """
+ Parameters:
+ - request
+ """
+ self.send_drop_wm_pool(request)
+ return self.recv_drop_wm_pool()
+
+ def send_drop_wm_pool(self, request):
+ self._oprot.writeMessageBegin('drop_wm_pool', TMessageType.CALL, self._seqid)
+ args = drop_wm_pool_args()
+ args.request = request
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_drop_wm_pool(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = drop_wm_pool_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ if result.o3 is not None:
+ raise result.o3
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_wm_pool failed: unknown result")
+
+ def create_or_update_wm_mapping(self, request):
+ """
+ Parameters:
+ - request
+ """
+ self.send_create_or_update_wm_mapping(request)
+ return self.recv_create_or_update_wm_mapping()
+
+ def send_create_or_update_wm_mapping(self, request):
+ self._oprot.writeMessageBegin('create_or_update_wm_mapping', TMessageType.CALL, self._seqid)
+ args = create_or_update_wm_mapping_args()
+ args.request = request
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_create_or_update_wm_mapping(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = create_or_update_wm_mapping_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ if result.o3 is not None:
+ raise result.o3
+ if result.o4 is not None:
+ raise result.o4
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "create_or_update_wm_mapping failed: unknown result")
+
+ def drop_wm_mapping(self, request):
+ """
+ Parameters:
+ - request
+ """
+ self.send_drop_wm_mapping(request)
+ return self.recv_drop_wm_mapping()
+
+ def send_drop_wm_mapping(self, request):
+ self._oprot.writeMessageBegin('drop_wm_mapping', TMessageType.CALL, self._seqid)
+ args = drop_wm_mapping_args()
+ args.request = request
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_drop_wm_mapping(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = drop_wm_mapping_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ if result.o3 is not None:
+ raise result.o3
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_wm_mapping failed: unknown result")
+
+ def create_or_drop_wm_trigger_to_pool_mapping(self, request):
+ """
+ Parameters:
+ - request
+ """
+ self.send_create_or_drop_wm_trigger_to_pool_mapping(request)
+ return self.recv_create_or_drop_wm_trigger_to_pool_mapping()
+
+ def send_create_or_drop_wm_trigger_to_pool_mapping(self, request):
+ self._oprot.writeMessageBegin('create_or_drop_wm_trigger_to_pool_mapping', TMessageType.CALL, self._seqid)
+ args = create_or_drop_wm_trigger_to_pool_mapping_args()
+ args.request = request
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_create_or_drop_wm_trigger_to_pool_mapping(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = create_or_drop_wm_trigger_to_pool_mapping_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ if result.o3 is not None:
+ raise result.o3
+ if result.o4 is not None:
+ raise result.o4
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "create_or_drop_wm_trigger_to_pool_mapping failed: unknown result")
+
class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
def __init__(self, handler):
@@ -7776,6 +8048,12 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
self._processMap["alter_wm_trigger"] = Processor.process_alter_wm_trigger
self._processMap["drop_wm_trigger"] = Processor.process_drop_wm_trigger
self._processMap["get_triggers_for_resourceplan"] = Processor.process_get_triggers_for_resourceplan
+ self._processMap["create_wm_pool"] = Processor.process_create_wm_pool
+ self._processMap["alter_wm_pool"] = Processor.process_alter_wm_pool
+ self._processMap["drop_wm_pool"] = Processor.process_drop_wm_pool
+ self._processMap["create_or_update_wm_mapping"] = Processor.process_create_or_update_wm_mapping
+ self._processMap["drop_wm_mapping"] = Processor.process_drop_wm_mapping
+ self._processMap["create_or_drop_wm_trigger_to_pool_mapping"] = Processor.process_create_or_drop_wm_trigger_to_pool_mapping
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
@@ -12027,6 +12305,186 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_create_wm_pool(self, seqid, iprot, oprot):
+ args = create_wm_pool_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = create_wm_pool_result()
+ try:
+ result.success = self._handler.create_wm_pool(args.request)
+ msg_type = TMessageType.REPLY
+ except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+ raise
+ except AlreadyExistsException as o1:
+ msg_type = TMessageType.REPLY
+ result.o1 = o1
+ except NoSuchObjectException as o2:
+ msg_type = TMessageType.REPLY
+ result.o2 = o2
+ except InvalidObjectException as o3:
+ msg_type = TMessageType.REPLY
+ result.o3 = o3
+ except MetaException as o4:
+ msg_type = TMessageType.REPLY
+ result.o4 = o4
+ except Exception as ex:
+ msg_type = TMessageType.EXCEPTION
+ logging.exception(ex)
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("create_wm_pool", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_alter_wm_pool(self, seqid, iprot, oprot):
+ args = alter_wm_pool_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = alter_wm_pool_result()
+ try:
+ result.success = self._handler.alter_wm_pool(args.request)
+ msg_type = TMessageType.REPLY
+ except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+ raise
+ except AlreadyExistsException as o1:
+ msg_type = TMessageType.REPLY
+ result.o1 = o1
+ except NoSuchObjectException as o2:
+ msg_type = TMessageType.REPLY
+ result.o2 = o2
+ except InvalidObjectException as o3:
+ msg_type = TMessageType.REPLY
+ result.o3 = o3
+ except MetaException as o4:
+ msg_type = TMessageType.REPLY
+ result.o4 = o4
+ except Exception as ex:
+ msg_type = TMessageType.EXCEPTION
+ logging.exception(ex)
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("alter_wm_pool", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_drop_wm_pool(self, seqid, iprot, oprot):
+ args = drop_wm_pool_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = drop_wm_pool_result()
+ try:
+ result.success = self._handler.drop_wm_pool(args.request)
+ msg_type = TMessageType.REPLY
+ except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+ raise
+ except NoSuchObjectException as o1:
+ msg_type = TMessageType.REPLY
+ result.o1 = o1
+ except InvalidOperationException as o2:
+ msg_type = TMessageType.REPLY
+ result.o2 = o2
+ except MetaException as o3:
+ msg_type = TMessageType.REPLY
+ result.o3 = o3
+ except Exception as ex:
+ msg_type = TMessageType.EXCEPTION
+ logging.exception(ex)
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("drop_wm_pool", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_create_or_update_wm_mapping(self, seqid, iprot, oprot):
+ args = create_or_update_wm_mapping_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = create_or_update_wm_mapping_result()
+ try:
+ result.success = self._handler.create_or_update_wm_mapping(args.request)
+ msg_type = TMessageType.REPLY
+ except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+ raise
+ except AlreadyExistsException as o1:
+ msg_type = TMessageType.REPLY
+ result.o1 = o1
+ except NoSuchObjectException as o2:
+ msg_type = TMessageType.REPLY
+ result.o2 = o2
+ except InvalidObjectException as o3:
+ msg_type = TMessageType.REPLY
+ result.o3 = o3
+ except MetaException as o4:
+ msg_type = TMessageType.REPLY
+ result.o4 = o4
+ except Exception as ex:
+ msg_type = TMessageType.EXCEPTION
+ logging.exception(ex)
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("create_or_update_wm_mapping", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_drop_wm_mapping(self, seqid, iprot, oprot):
+ args = drop_wm_mapping_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = drop_wm_mapping_result()
+ try:
+ result.success = self._handler.drop_wm_mapping(args.request)
+ msg_type = TMessageType.REPLY
+ except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+ raise
+ except NoSuchObjectException as o1:
+ msg_type = TMessageType.REPLY
+ result.o1 = o1
+ except InvalidOperationException as o2:
+ msg_type = TMessageType.REPLY
+ result.o2 = o2
+ except MetaException as o3:
+ msg_type = TMessageType.REPLY
+ result.o3 = o3
+ except Exception as ex:
+ msg_type = TMessageType.EXCEPTION
+ logging.exception(ex)
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("drop_wm_mapping", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_create_or_drop_wm_trigger_to_pool_mapping(self, seqid, iprot, oprot):
+ args = create_or_drop_wm_trigger_to_pool_mapping_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = create_or_drop_wm_trigger_to_pool_mapping_result()
+ try:
+ result.success = self._handler.create_or_drop_wm_trigger_to_pool_mapping(args.request)
+ msg_type = TMessageType.REPLY
+ except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+ raise
+ except AlreadyExistsException as o1:
+ msg_type = TMessageType.REPLY
+ result.o1 = o1
+ except NoSuchObjectException as o2:
+ msg_type = TMessageType.REPLY
+ result.o2 = o2
+ except InvalidObjectException as o3:
+ msg_type = TMessageType.REPLY
+ result.o3 = o3
+ except MetaException as o4:
+ msg_type = TMessageType.REPLY
+ result.o4 = o4
+ except Exception as ex:
+ msg_type = TMessageType.EXCEPTION
+ logging.exception(ex)
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("create_or_drop_wm_trigger_to_pool_mapping", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
# HELPER FUNCTIONS AND STRUCTURES
@@ -31465,22 +31923,1276 @@ class get_functions_result:
def __ne__(self, other):
return not (self == other)
-class get_function_args:
+class get_function_args:
+ """
+ Attributes:
+ - dbName
+ - funcName
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'dbName', None, None, ), # 1
+ (2, TType.STRING, 'funcName', None, None, ), # 2
+ )
+
+ def __init__(self, dbName=None, funcName=None,):
+ self.dbName = dbName
+ self.funcName = funcName
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.dbName = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.funcName = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_function_args')
+ if self.dbName is not None:
+ oprot.writeFieldBegin('dbName', TType.STRING, 1)
+ oprot.writeString(self.dbName)
+ oprot.writeFieldEnd()
+ if self.funcName is not None:
+ oprot.writeFieldBegin('funcName', TType.STRING, 2)
+ oprot.writeString(self.funcName)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.dbName)
+ value = (value * 31) ^ hash(self.funcName)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class get_function_result:
+ """
+ Attributes:
+ - success
+ - o1
+ - o2
+ """
+
+ thrift_spec = (
+ (0, TType.STRUCT, 'success', (Function, Function.thrift_spec), None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2
+ )
+
+ def __init__(self, success=None, o1=None, o2=None,):
+ self.success = success
+ self.o1 = o1
+ self.o2 = o2
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = Function()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o2 = NoSuchObjectException()
+ self.o2.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_function_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o2 is not None:
+ oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+ self.o2.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.success)
+ value = (value * 31) ^ hash(self.o1)
+ value = (value * 31) ^ hash(self.o2)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class get_all_functions_args:
+
+ thrift_spec = (
+ )
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_all_functions_args')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class get_all_functions_result:
+ """
+ Attributes:
+ - success
+ - o1
+ """
+
+ thrift_spec = (
+ (0, TType.STRUCT, 'success', (GetAllFunctionsResponse, GetAllFunctionsResponse.thrift_spec), None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, success=None, o1=None,):
+ self.success = success
+ self.o1 = o1
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = GetAllFunctionsResponse()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_all_functions_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.success)
+ value = (value * 31) ^ hash(self.o1)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class create_role_args:
+ """
+ Attributes:
+ - role
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'role', (Role, Role.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, role=None,):
+ self.role = role
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.role = Role()
+ self.role.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('create_role_args')
+ if self.role is not None:
+ oprot.writeFieldBegin('role', TType.STRUCT, 1)
+ self.role.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.role)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class create_role_result:
+ """
+ Attributes:
+ - success
+ - o1
+ """
+
+ thrift_spec = (
+ (0, TType.BOOL, 'success', None, None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, success=None, o1=None,):
+ self.success = success
+ self.o1 = o1
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.BOOL:
+ self.success = iprot.readBool()
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('create_role_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.BOOL, 0)
+ oprot.writeBool(self.success)
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.success)
+ value = (value * 31) ^ hash(self.o1)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class drop_role_args:
+ """
+ Attributes:
+ - role_name
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'role_name', None, None, ), # 1
+ )
+
+ def __init__(self, role_name=None,):
+ self.role_name = role_name
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.role_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('drop_role_args')
+ if self.role_name is not None:
+ oprot.writeFieldBegin('role_name', TType.STRING, 1)
+ oprot.writeString(self.role_name)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.role_name)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class drop_role_result:
+ """
+ Attributes:
+ - success
+ - o1
+ """
+
+ thrift_spec = (
+ (0, TType.BOOL, 'success', None, None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, success=None, o1=None,):
+ self.success = success
+ self.o1 = o1
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.BOOL:
+ self.success = iprot.readBool()
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('drop_role_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.BOOL, 0)
+ oprot.writeBool(self.success)
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.success)
+ value = (value * 31) ^ hash(self.o1)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class get_role_names_args:
+
+ thrift_spec = (
+ )
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_role_names_args')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class get_role_names_result:
+ """
+ Attributes:
+ - success
+ - o1
+ """
+
+ thrift_spec = (
+ (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, success=None, o1=None,):
+ self.success = success
+ self.o1 = o1
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.LIST:
+ self.success = []
+ (_etype1120, _size1117) = iprot.readListBegin()
+ for _i1121 in xrange(_size1117):
+ _elem1122 = iprot.readString()
+ self.success.append(_elem1122)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_role_names_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.LIST, 0)
+ oprot.writeListBegin(TType.STRING, len(self.success))
+ for iter1123 in self.success:
+ oprot.writeString(iter1123)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.success)
+ value = (value * 31) ^ hash(self.o1)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class grant_role_args:
+ """
+ Attributes:
+ - role_name
+ - principal_name
+ - principal_type
+ - grantor
+ - grantorType
+ - grant_option
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'role_name', None, None, ), # 1
+ (2, TType.STRING, 'principal_name', None, None, ), # 2
+ (3, TType.I32, 'principal_type', None, None, ), # 3
+ (4, TType.STRING, 'grantor', None, None, ), # 4
+ (5, TType.I32, 'grantorType', None, None, ), # 5
+ (6, TType.BOOL, 'grant_option', None, None, ), # 6
+ )
+
+ def __init__(self, role_name=None, principal_name=None, principal_type=None, grantor=None, grantorType=None, grant_option=None,):
+ self.role_name = role_name
+ self.principal_name = principal_name
+ self.principal_type = principal_type
+ self.grantor = grantor
+ self.grantorType = grantorType
+ self.grant_option = grant_option
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.role_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.principal_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I32:
+ self.principal_type = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRING:
+ self.grantor = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I32:
+ self.grantorType = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.BOOL:
+ self.grant_option = iprot.readBool()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('grant_role_args')
+ if self.role_name is not None:
+ oprot.writeFieldBegin('role_name', TType.STRING, 1)
+ oprot.writeString(self.role_name)
+ oprot.writeFieldEnd()
+ if self.principal_name is not None:
+ oprot.writeFieldBegin('principal_name', TType.STRING, 2)
+ oprot.writeString(self.principal_name)
+ oprot.writeFieldEnd()
+ if self.principal_type is not None:
+ oprot.writeFieldBegin('principal_type', TType.I32, 3)
+ oprot.writeI32(self.principal_type)
+ oprot.writeFieldEnd()
+ if self.grantor is not None:
+ oprot.writeFieldBegin('grantor', TType.STRING, 4)
+ oprot.writeString(self.grantor)
+ oprot.writeFieldEnd()
+ if self.grantorType is not None:
+ oprot.writeFieldBegin('grantorType', TType.I32, 5)
+ oprot.writeI32(self.grantorType)
+ oprot.writeFieldEnd()
+ if self.grant_option is not None:
+ oprot.writeFieldBegin('grant_option', TType.BOOL, 6)
+ oprot.writeBool(self.grant_option)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.role_name)
+ value = (value * 31) ^ hash(self.principal_name)
+ value = (value * 31) ^ hash(self.principal_type)
+ value = (value * 31) ^ hash(self.grantor)
+ value = (value * 31) ^ hash(self.grantorType)
+ value = (value * 31) ^ hash(self.grant_option)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class grant_role_result:
+ """
+ Attributes:
+ - success
+ - o1
+ """
+
+ thrift_spec = (
+ (0, TType.BOOL, 'success', None, None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, success=None, o1=None,):
+ self.success = success
+ self.o1 = o1
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.BOOL:
+ self.success = iprot.readBool()
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('grant_role_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.BOOL, 0)
+ oprot.writeBool(self.success)
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.success)
+ value = (value * 31) ^ hash(self.o1)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class revoke_role_args:
+ """
+ Attributes:
+ - role_name
+ - principal_name
+ - principal_type
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'role_name', None, None, ), # 1
+ (2, TType.STRING, 'principal_name', None, None, ), # 2
+ (3, TType.I32, 'principal_type', None, None, ), # 3
+ )
+
+ def __init__(self, role_name=None, principal_name=None, principal_type=None,):
+ self.role_name = role_name
+ self.principal_name = principal_name
+ self.principal_type = principal_type
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.role_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.principal_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I32:
+ self.principal_type = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('revoke_role_args')
+ if self.role_name is not None:
+ oprot.writeFieldBegin('role_name', TType.STRING, 1)
+ oprot.writeString(self.role_name)
+ oprot.writeFieldEnd()
+ if self.principal_name is not None:
+ oprot.writeFieldBegin('principal_name', TType.STRING, 2)
+ oprot.writeString(self.principal_name)
+ oprot.writeFieldEnd()
+ if self.principal_type is not None:
+ oprot.writeFieldBegin('principal_type', TType.I32, 3)
+ oprot.writeI32(self.principal_type)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.role_name)
+ value = (value * 31) ^ hash(self.principal_name)
+ value = (value * 31) ^ hash(self.principal_type)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class revoke_role_result:
+ """
+ Attributes:
+ - success
+ - o1
+ """
+
+ thrift_spec = (
+ (0, TType.BOOL, 'success', None, None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, success=None, o1=None,):
+ self.success = success
+ self.o1 = o1
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.BOOL:
+ self.success = iprot.readBool()
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('revoke_role_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.BOOL, 0)
+ oprot.writeBool(self.success)
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.success)
+ value = (value * 31) ^ hash(self.o1)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class list_roles_args:
+ """
+ Attributes:
+ - principal_name
+ - principal_type
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'principal_name', None, None, ), # 1
+ (2, TType.I32, 'principal_type', None, None, ), # 2
+ )
+
+ def __init__(self, principal_name=None, principal_type=None,):
+ self.principal_name = principal_name
+ self.principal_type = principal_type
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.principal_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.principal_type = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('list_roles_args')
+ if self.principal_name is not None:
+ oprot.writeFieldBegin('principal_name', TType.STRING, 1)
+ oprot.writeString(self.principal_name)
+ oprot.writeFieldEnd()
+ if self.principal_type is not None:
+ oprot.writeFieldBegin('principal_type', TType.I32, 2)
+ oprot.writeI32(self.principal_type)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.principal_name)
+ value = (value * 31) ^ hash(self.principal_type)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class list_roles_result:
+ """
+ Attributes:
+ - success
+ - o1
+ """
+
+ thrift_spec = (
+ (0, TType.LIST, 'success', (TType.STRUCT,(Role, Role.thrift_spec)), None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, success=None, o1=None,):
+ self.success = success
+ self.o1 = o1
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.LIST:
+ self.success = []
+ (_etype1127, _size1124) = iprot.readListBegin()
+ for _i1128 in xrange(_size1124):
+ _elem1129 = Role()
+ _elem1129.read(iprot)
+ self.success.append(_elem1129)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('list_roles_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.LIST, 0)
+ oprot.writeListBegin(TType.STRUCT, len(self.success))
+ for iter1130 in self.success:
+ iter1130.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.success)
+ value = (value * 31) ^ hash(self.o1)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class grant_revoke_role_args:
"""
Attributes:
- - dbName
- - funcName
+ - request
"""
thrift_spec = (
None, # 0
- (1, TType.STRING, 'dbName', None, None, ), # 1
- (2, TType.STRING, 'funcName', None, None, ), # 2
+ (1, TType.STRUCT, 'request', (GrantRevokeRoleRequest, GrantRevokeRoleRequest.thrift_spec), None, ), # 1
)
- def __init__(self, dbName=None, funcName=None,):
- self.dbName = dbName
- self.funcName = funcName
+ def __init__(self, request=None,):
+ self.request = request
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -31492,13 +33204,9 @@ class get_function_args:
if ftype == TType.STOP:
break
if fid == 1:
- if ftype == TType.STRING:
- self.dbName = iprot.readString()
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- self.funcName = iprot.readString()
+ if ftype == TType.STRUCT:
+ self.request = GrantRevokeRoleRequest()
+ self.request.read(iprot)
else:
iprot.skip(ftype)
else:
@@ -31510,14 +33218,10 @@ class get_function_args:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('get_function_args')
- if self.dbName is not None:
- oprot.writeFieldBegin('dbName', TType.STRING, 1)
- oprot.writeString(self.dbName)
- oprot.writeFieldEnd()
- if self.funcName is not None:
- oprot.writeFieldBegin('funcName', TType.STRING, 2)
- oprot.writeString(self.funcName)
+ oprot.writeStructBegin('grant_revoke_role_args')
+ if self.request is not None:
+ oprot.writeFieldBegin('request', TType.STRUCT, 1)
+ self.request.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -31528,8 +33232,7 @@ class get_function_args:
def __hash__(self):
value = 17
- value = (value * 31) ^ hash(self.dbName)
- value = (value * 31) ^ hash(self.funcName)
+ value = (value * 31) ^ hash(self.request)
return value
def __repr__(self):
@@ -31543,24 +33246,21 @@ class get_function_args:
def __ne__(self, other):
return not (self == other)
-class get_function_result:
+class grant_revoke_role_result:
"""
Attributes:
- success
- o1
- - o2
"""
thrift_spec = (
- (0, TType.STRUCT, 'success', (Function, Function.thrift_spec), None, ), # 0
+ (0, TType.STRUCT, 'success', (GrantRevokeRoleResponse, GrantRevokeRoleResponse.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
- (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2
)
- def __init__(self, success=None, o1=None, o2=None,):
+ def __init__(self, success=None, o1=None,):
self.success = success
self.o1 = o1
- self.o2 = o2
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -31573,7 +33273,7 @@ class get_function_result:
break
if fid == 0:
if ftype == TType.STRUCT:
- self.success = Function()
+ self.success = GrantRevokeRoleResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
@@ -31583,12 +33283,6 @@ class get_function_result:
self.o1.read(iprot)
else:
iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRUCT:
- self.o2 = NoSuchObjectException()
- self.o2.read(iprot)
- else:
- iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -31598,7 +33292,7 @@ class get_function_result:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('get_function_result')
+ oprot.writeStructBegin('grant_revoke_role_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
@@ -31607,10 +33301,6 @@ class get_function_result:
oprot.writeFieldBegin('o1', TType.STRUCT, 1)
self.o1.write(oprot)
oprot.writeFieldEnd()
- if self.o2 is not None:
- oprot.writeFieldBegin('o2', TType.STRUCT, 2)
- self.o2.write(oprot)
- oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -31622,7 +33312,6 @@ class get_function_result:
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.o1)
- value = (value * 31) ^ hash(self.o2)
return value
def __repr__(self):
@@ -31636,11 +33325,20 @@ class get_function_result:
def __ne__(self, other):
return not (self == other)
-class get_all_functions_args:
+class get_principals_in_role_args:
+ """
+ Attributes:
+ - request
+ """
thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'request', (GetPrincipalsInRoleRequest, GetPrincipalsInRoleRequest.thrift_spec), None, ), # 1
)
+ def __init__(self, request=None,):
+ self.request = request
+
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
@@ -31650,6 +33348,12 @@ class get_all_functions_args:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.request = GetPrincipalsInRoleRequest()
+ self.request.read(iprot)
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -31659,7 +33363,11 @@ class get_all_functions_args:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('get_all_functions_args')
+ oprot.writeStructBegin('get_principals_in_role_args')
+ if self.request is not None:
+ oprot.writeFieldBegin('request', TType.STRUCT, 1)
+ self.request.write(oprot)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -31669,6 +33377,7 @@ class get_all_functions_args:
def __hash__(self):
value = 17
+ value = (value * 31) ^ hash(self.request)
return value
def __repr__(self):
@@ -31682,7 +33391,7 @@ class get_all_functions_args:
def __ne__(self, other):
return not (self == other)
-class get_all_functions_result:
+class get_principals_in_role_result:
"""
Attributes:
- success
@@ -31690,7 +33399,7 @@ class get_all_functions_result:
"""
thrift_spec = (
- (0, TType.STRUCT, 'success', (GetAllFunctionsResponse, GetAllFunctionsResponse.thrift_spec), None, ), # 0
+ (0, TType.STRUCT, 'success', (GetPrincipalsInRoleResponse, GetPrincipalsInRoleResponse.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
)
@@ -31709,7 +33418,7 @@ class get_all_functions_result:
break
if fid == 0:
if ftype == TType.STRUCT:
- self.success = GetAllFunctionsResponse()
+ self.success = GetPrincipalsInRoleResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
@@ -31728,7 +33437,7 @@ class get_all_functions_result:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('get_all_functions_result')
+ oprot.writeStructBegin('get_principals_in_role_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
@@ -31761,19 +33470,19 @@ class get_all_functions_result:
def __ne__(self, other):
return not (self == other)
-class create_role_args:
+class get_role_grants_for_principal_args:
"""
Attributes:
- - role
+ - request
"""
thrift_spec = (
None, # 0
- (1, TType.STRUCT, 'role', (Role, Role.thrift_spec), None, ), # 1
+ (1, TType.STRUCT, 'request', (GetRoleGrantsForPrincipalRequest, GetRoleGrantsForPrincipalRequest.thrift_spec), None, ), # 1
)
- def __init__(self, role=None,):
- self.role = role
+ def __init__(self, request=None,):
+ self.request = request
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -31786,8 +33495,8 @@ class create_role_args:
break
if fid == 1:
if ftype == TType.STRUCT:
- self.role = Role()
- self.role.read(iprot)
+ self.request = GetRoleGrantsForPrincipalRequest()
+ self.request.read(iprot)
else:
iprot.skip(ftype)
else:
@@ -31799,10 +33508,10 @@ class create_role_args:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('create_role_args')
- if self.role is not None:
- oprot.writeFieldBegin('role', TType.STRUCT, 1)
- self.role.write(oprot)
+ oprot.writeStructBegin('get_role_grants_for_principal_args')
+ if self.request is not None:
+ oprot.writeFieldBegin('request', TType.STRUCT, 1)
+ self.request.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -31813,7 +33522,7 @@ class create_role_args:
def __hash__(self):
value = 17
- value = (value * 31) ^ hash(self.role)
+ value = (value * 31) ^ hash(self.request)
return value
def __repr__(self):
@@ -31827,7 +33536,7 @@ class create_role_args:
def __ne__(self, other):
return not (self == other)
-class create_role_result:
+class get_role_grants_for_principal_result:
"""
Attributes:
- success
@@ -31835,7 +33544,7 @@ class create_role_result:
"""
thrift_spec = (
- (0, TType.BOOL, 'success', None, None, ), # 0
+ (0, TType.STRUCT, 'success', (GetRoleGrantsForPrincipalResponse, GetRoleGrantsForPrincipalResponse.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
)
@@ -31853,8 +33562,9 @@ class create_role_result:
if ftype == TType.STOP:
break
if fid == 0:
- if ftype == TType.BOOL:
- self.success = iprot.readBool()
+ if ftype == TType.STRUCT:
+ self.success = GetRoleGrantsForPrincipalResponse()
+ self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
@@ -31872,10 +33582,10 @@ class create_role_result:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('create_role_result')
+ oprot.writeStructBegin('get_role_grants_for_principal_result')
if self.success is not None:
- oprot.writeFieldBegin('success', TType.BOOL, 0)
- oprot.writeBool(self.success)
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
oprot.writeFieldEnd()
if self.o1 is not None:
oprot.writeFieldBegin('o1', TType.STRUCT, 1)
@@ -31905,19 +33615,25 @@ class create_role_result:
def __ne__(self, other):
return not (self == other)
-class drop_role_args:
+class get_privilege_set_args:
"""
Attributes:
- - role_name
+ - hiveObject
+ - user_name
+ - group_names
"""
thrift_spec = (
None, # 0
- (1, TType.STRING, 'role_name', None, None, ), # 1
+ (1, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 1
+ (2, TType.STRING, 'user_name', None, None, ), # 2
+ (3, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 3
)
- def __init__(self, role_name=None,):
- self.role_name = role_name
+ def __init__(self, hiveObject=None, user_name=None, group_names=None,):
+ self.hiveObject = hiveObject
+ self.user_name = user_name
+ self.group_names = group_names
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -31929,8 +33645,24 @@ class drop_role_args:
if ftype == TType.STOP:
break
if fid == 1:
+ if ftype == TType.STRUCT:
+ self.hiveObject = HiveObjectRef()
+ self.hiveObject.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
if ftype == TType.STRING:
- self.role_name = iprot.readString()
+ self.user_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.LIST:
+ self.group_names = []
+ (_etype1134, _size1131) = iprot.readListBegin()
+ for _i1135 in xrange(_size1131):
+ _elem1136 = iprot.readString()
+ self.group_names.append(_elem1136)
+ iprot.readListEnd()
else:
iprot.skip(ftype)
else:
@@ -31942,10 +33674,21 @@ class drop_role_args:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('drop_role_args')
- if self.role_name is not None:
- oprot.writeFieldBegin('role_name', TType.STRING, 1)
- oprot.writeString(self.role_name)
+ oprot.writeStructBegin('get_privilege_set_args')
+ if self.hiveObject is not None:
+ oprot.writeFieldBegin('hiveObject', TType.STRUCT, 1)
+ self.hiveObject.write(oprot)
+ oprot.writeFieldEnd()
+ if self.user_name is not None:
+ oprot.writeFieldBegin('user_name', TType.STRING, 2)
+ oprot.writeString(self.user_name)
+ oprot.writeFieldEnd()
+ if self.group_names is not None:
+ oprot.writeFieldBegin('group_names', TType.LIST, 3)
+ oprot.writeListBegin(TType.STRING, len(self.group_names))
+ for iter1137 in self.group_names:
+ oprot.writeString(iter1137)
+ oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -31956,7 +33699,9 @@ class drop_role_args:
def __hash__(self):
value = 17
- value = (value * 31) ^ hash(self.role_name)
+ value = (value * 31) ^ hash(self.hiveObject)
+ value = (value * 31) ^ hash(self.user_name)
+ value = (value * 31) ^ hash(self.group_names)
return value
def __repr__(self):
@@ -31970,7 +33715,7 @@ class drop_role_args:
def __ne__(self, other):
return not (self == other)
-class drop_role_result:
+class get_privilege_set_result:
"""
Attributes:
- success
@@ -31978,7 +33723,7 @@ class drop_role_result:
"""
thrift_spec = (
- (0, TType.BOOL, 'success', None, None, ), # 0
+ (0, TType.STRUCT, 'success', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
)
@@ -31996,8 +33741,9 @@ class drop_role_result:
if ftype == TType.STOP:
break
if fid == 0:
- if ftype == TType.BOOL:
- self.success = iprot.readBool()
+ if ftype == TType.STRUCT:
+ self.success = PrincipalPrivilegeSet()
+ self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
@@ -32015,10 +33761,10 @@ class drop_role_result:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('drop_role_result')
+ oprot.writeStructBegin('get_privilege_set_result')
if self.success is not None:
- oprot.writeFieldBegin('success', TType.BOOL, 0)
- oprot.writeBool(self.success)
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
oprot.writeFieldEnd()
if self.o1 is not None:
oprot.writeFieldBegin('o1', TType.STRUCT, 1)
@@ -32048,11 +33794,26 @@ class drop_role_result:
def __ne__(self, other):
return not (self == other)
-class get_role_names_args:
+class list_privileges_args:
+ """
+ Attributes:
+ - principal_name
+ - principal_type
+ - hiveObject
+ """
thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'principal_name', None, None, ), # 1
+ (2, TType.I32, 'principal_type', None, None, ), # 2
+ (3, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 3
)
+ def __init__(self, principal_name=None, principal_type=None, hiveObject=None,):
+ self.principal_name = principal_name
+ self.principal_type = principal_type
+ self.hiveObject = hiveObject
+
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
@@ -32062,6 +33823,22 @@ class get_role_names_args:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.principal_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.principal_type = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.hiveObject = HiveObjectRef()
+ self.hiveObject.read(iprot)
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -32071,7 +33848,19 @@ class get_role_names_args:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('get_role_names_args')
+ oprot.writeStructBegin('list_privileges_args')
+ if self.principal_name is not None:
+ oprot.writeFieldBegin('principal_name', TType.STRING, 1)
+ oprot.writeString(self.principal_name)
+ oprot.writeFieldEnd()
+ if self.principal_type is not None:
+ oprot.writeFieldBegin('principal_type', TType.I32, 2)
+ oprot.writeI32(self.principal_type)
+ oprot.writeFieldEnd()
+ if self.hiveObject is not None:
+ oprot.writeFieldBegin('hiveObject', TType.STRUCT, 3)
+ self.hiveObject.write(oprot)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -32081,6 +33870,9 @@ class get_role_names_args:
def __hash__(self):
value = 17
+ value = (value * 31) ^ hash(self.principal_name)
+ value = (value * 31) ^ hash(self.principal_type)
+ value = (value * 31) ^ hash(self.hiveObject)
return value
def __repr__(self):
@@ -32094,7 +33886,7 @@ class get_role_names_args:
def __ne__(self, other):
return not (self == other)
-class get_role_names_result:
+class list_privileges_result:
"""
Attributes:
- success
@@ -32102,7 +33894,7 @@ class get_role_names_result:
"""
thrift_spec = (
- (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
+ (0, TType.LIST, 'success', (TType.STRUCT,(HiveObjectPrivilege, HiveObjectPrivilege.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
)
@@ -32122,10 +33914,11 @@ class get_role_names_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1120, _size1117) = iprot.readListBegin()
- for _i1121 in xrange(_size1117):
- _elem1122 = iprot.readString()
- self.success.append(_elem1122)
+ (_etype1141, _size1138) = iprot.readListBegin()
+ for _i1142 in xrange(_size1138):
+ _elem1143 = HiveObjectPrivilege()
+ _elem1143.read(iprot)
+ self.success.append(_elem1143)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -32144,12 +33937,12 @@ class get_role_names_result:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('get_role_names_result')
+ oprot.writeStructBegin('list_privileges_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
- oprot.writeListBegin(TType.STRING, len(self.success))
- for iter1123 in self.success:
- oprot.writeString(iter1123)
+ oprot.writeListBegin(TType.STRUCT, len(self.success))
+ for iter1144 in self.success:
+ iter1144.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -32180,34 +33973,19 @@ class get_role_names_result:
def __ne__(self, other):
return not (self == other)
-class grant_role_args:
+class grant_privileges_args:
"""
Attributes:
- - role_name
- - principal_name
- - principal_type
- - grantor
- - grantorType
- - grant_option
+ - privileges
"""
thrift_spec = (
None, # 0
- (1, TType.STRING, 'role_name', None, None, ), # 1
- (2, TType.STRING, 'principal_name', None, None, ), # 2
- (3, TType.I32, 'principal_type', None, None, ), # 3
- (4, TType.STRING, 'grantor', None, None, ), # 4
- (5, TType.I32, 'grantorType', None, None, ), # 5
- (6, TType.BOOL, 'grant_option', None, None, ), # 6
+ (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1
)
- def __init__(self, role_name=None, principal_name=None, principal_type=None, grantor=None, grantorType=None, grant_option=None,):
- self.role_name = role_name
- self.principal_name = principal_name
- self.principal_type = principal_type
- self.grantor = grantor
- self.grantorType = grantorType
- self.grant_option = grant_option
+ def __init__(self, privileges=None,):
+ self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -32219,33 +33997,9 @@ class grant_role_args:
if ftype == TType.STOP:
break
if fid == 1:
- if ftype == TType.STRING:
- self.role_name = iprot.readString()
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- self.principal_name = iprot.readString()
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.I32:
- self.principal_type = iprot.readI32()
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.STRING:
- self.grantor = iprot.readString()
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.I32:
- self.grantorType = iprot.readI32()
- else:
- iprot.skip(ftype)
- elif fid == 6:
- if ftype == TType.BOOL:
- self.grant_option = iprot.readBool()
+ if ftype == TType.STRUCT:
+ self.privileges = PrivilegeBag()
+ self.privileges.read(iprot)
else:
iprot.skip(ftype)
else:
@@ -32257,30 +34011,10 @@ class grant_role_args:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('grant_role_args')
- if self.role_name is not None:
- oprot.writeFieldBegin('role_name', TType.STRING, 1)
- oprot.writeString(self.role_name)
- oprot.writeFieldEnd()
- if self.principal_name is not None:
- oprot.writeFieldBegin('principal_name', TType.STRING, 2)
- oprot.writeString(self.principal_name)
- oprot.writeFieldEnd()
- if self.principal_type is not None:
- oprot.writeFieldBegin('principal_type', TType.I32, 3)
- oprot.writeI32(self.principal_type)
- oprot.writeFieldEnd()
- if self.grantor is not None:
- oprot.writeFieldBegin('grantor', TType.STRING, 4)
- oprot.writeString(self.grantor)
- oprot.writeFieldEnd()
- if self.grantorType is not None:
- oprot.writeFieldBegin('grantorType', TType.I32, 5)
- oprot.writeI32(self.grantorType)
- oprot.writeFieldEnd()
- if self.grant_option is not None:
- oprot.writeFieldBegin('grant_option', TType.BOOL, 6)
- oprot.writeBool(self.grant_option)
+ oprot.writeStructBegin('grant_privileges_args')
+ if self.privileges is not None:
+ oprot.writeFieldBegin('privileges', TType.STRUCT, 1)
+ self.privileges.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -32291,12 +34025,7 @@ class grant_role_args:
def __hash__(self):
value = 17
- value = (value * 31) ^ hash(self.role_name)
- value = (value * 31) ^ hash(self.principal_name)
- value = (value * 31) ^ hash(self.principal_type)
- value = (value * 31) ^ hash(self.grantor)
- value = (value * 31) ^ hash(self.grantorType)
- value = (value * 31) ^ hash(self.grant_option)
+ value = (value * 31) ^ hash(self.privileges)
return value
def __repr__(self):
@@ -32310,7 +34039,7 @@ class grant_role_args:
def __ne__(self, other):
return not (self == other)
-class grant_role_result:
+class grant_privileges_result:
"""
Attributes:
- success
@@ -32355,7 +34084,7 @@ class grant_role_result:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('grant_role_result')
+ oprot.writeStructBegin('grant_privileges_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
@@ -32388,25 +34117,19 @@ class grant_role_result:
def __ne__(self, other):
return not (self == other)
-class revoke_role_args:
+class revoke_privileges_args:
"""
Attributes:
- - role_name
- - principal_name
- - principal_type
+ - privileges
"""
thrift_spec = (
None, # 0
- (1, TType.STRING, 'role_name', None, None, ), # 1
- (2, TType.STRING, 'principal_name', None, None, ), # 2
- (3, TType.I32, 'principal_type', None, None, ), # 3
+ (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1
)
- def __init__(self, role_name=None, principal_name=None, principal_type=None,):
- self.role_name = role_name
- self.principal_name = principal_name
- self.principal_type = principal_type
+ def __init__(self, privileges=None,):
+ self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -32418,18 +34141,9 @@ class revoke_role_args:
if ftype == TType.STOP:
break
if fid == 1:
- if ftype == TType.STRING:
- self.role_name = iprot.readString()
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- self.principal_name = iprot.readString()
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.I32:
- self.principal_type = iprot.readI32()
+ if ftype == TType.STRUCT:
+ self.privileges = PrivilegeBag()
+ self.privileges.read(iprot)
else:
iprot.skip(ftype)
else:
@@ -32441,18 +34155,10 @@ class revoke_role_args:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('revoke_role_args')
- if self.role_name is not None:
- oprot.writeFieldBegin('role_name', TType.STRING, 1)
- oprot.writeString(self.role_name)
- oprot.writeFieldEnd()
- if self.principal_name is not None:
- oprot.writeFieldBegin('principal_name', TType.STRING, 2)
- oprot.writeString(self.principal_name)
- oprot.writeFieldEnd()
- if self.principal_type is not None:
- oprot.writeFieldBegin('principal_type', TType.I32, 3)
- oprot.writeI32(self.principal_type)
+ oprot.writeStructBegin('revoke_privileges_args')
+ if self.privileges is not None:
+ oprot.writeFieldBegin('privileges', TType.STRUCT, 1)
+ self.privileges.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -32463,9 +34169,7 @@ class revoke_role_args:
def __hash__(self):
value = 17
- value = (value * 31) ^ hash(self.role_name)
- value = (value * 31) ^ hash(self.principal_name)
- value = (value * 31) ^ hash(self.principal_type)
+ value = (value * 31) ^ hash(self.privileges)
return value
def __repr__(self):
@@ -32479,7 +34183,7 @@ class revoke_role_args:
def __ne__(self, other):
return not (self == other)
-class revoke_role_result:
+class revoke_privileges_result:
"""
Attributes:
- success
@@ -32524,7 +34228,7 @@ class revoke_role_result:
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('revoke_role_result')
+ oprot.writeStructBegin('revoke_privileges_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
@@ -32557,22 +34261,19 @@ class revoke_role_result:
def __ne__(self, other):
return not (self == other)
-class list_roles_args:
+class grant_revoke_privileges_args:
"""
Attributes:
- - principal_name
- - principal_type
+ - request
"""
thrift_spec = (
None, # 0
- (1, TType.STRING, 'principal_name', None, None, ), # 1
- (2, TType.I32, 'principal_type', None, None, ), # 2
+ (1, TType.STRUCT, 'request', (GrantRevokePrivilegeRequest, GrantRevokePrivilegeRequest.thrift_spec), None, ), # 1
)
- def __init__(self, principal_name=None, principal_type=None,):
- self.principal_name = principal_nam
<TRUNCATED>
[12/13] hive git commit: HIVE-17954 : Implement pool, user,
group and trigger to pool management API's (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java
index b6298da..43e6e33 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java
@@ -20,92 +20,51 @@ package org.apache.hadoop.hive.ql.plan;
import java.io.Serializable;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.ql.plan.Explain.Level;
-@Explain(displayName = "Alter Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+@Explain(displayName = "Alter Resource plans",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
public class AlterResourcePlanDesc extends DDLDesc implements Serializable {
private static final long serialVersionUID = -3514685833183437279L;
+ private WMResourcePlan resourcePlan;
private String rpName;
- private String newName;
- private Integer queryParallelism;
- private WMResourcePlanStatus status;
private boolean validate;
- private String defaultPoolPath;
private boolean isEnableActivate;
public AlterResourcePlanDesc() {}
- private AlterResourcePlanDesc(String rpName, String newName, Integer queryParallelism,
- WMResourcePlanStatus status, boolean validate, String defaultPoolPath) {
+ public AlterResourcePlanDesc(WMResourcePlan resourcePlan, String rpName, boolean validate,
+ boolean isEnableActivate) {
+ this.resourcePlan = resourcePlan;
this.rpName = rpName;
- this.newName = newName;
- this.queryParallelism = queryParallelism;
- this.status = status;
this.validate = validate;
- this.defaultPoolPath = defaultPoolPath;
+ this.isEnableActivate = isEnableActivate;
}
- public static AlterResourcePlanDesc createSet(String rpName) {
- return new AlterResourcePlanDesc(rpName, null, null, null, false, null);
+ @Explain(displayName="resourcePlan",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public WMResourcePlan getResourcePlan() {
+ return resourcePlan;
}
- public static AlterResourcePlanDesc createChangeStatus(
- String rpName, WMResourcePlanStatus status) {
- return new AlterResourcePlanDesc(rpName, null, null, status, false, null);
+ public void setResourcePlan(WMResourcePlan resourcePlan) {
+ this.resourcePlan = resourcePlan;
}
- public static AlterResourcePlanDesc createValidatePlan(String rpName) {
- return new AlterResourcePlanDesc(rpName, null, null, null, true, null);
- }
-
- @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getRpName() {
+ @Explain(displayName="resourcePlanName",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public String getResourcePlanName() {
return rpName;
}
- public void setRpName(String rpName) {
+ public void setResourcePlanName(String rpName) {
this.rpName = rpName;
}
- @Explain(displayName="newResourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getNewName() {
- return newName;
- }
-
- public void setNewName(String newName) {
- this.newName = newName;
- }
-
- @Explain(displayName="Default pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getDefaultPoolPath() {
- return defaultPoolPath;
- }
-
- public void setDefaultPoolPath(String defaultPoolPath) {
- this.defaultPoolPath = defaultPoolPath;
- }
-
- @Explain(displayName="queryParallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
- public Integer getQueryParallelism() {
- return queryParallelism;
- }
-
- public void setQueryParallelism(Integer queryParallelism) {
- this.queryParallelism = queryParallelism;
- }
-
- @Explain(displayName="status", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
- public WMResourcePlanStatus getStatus() {
- return status;
- }
-
- public void setStatus(WMResourcePlanStatus status) {
- this.status = status;
- }
-
- @Explain(displayName="shouldValidate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ @Explain(displayName="shouldValidate",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
public boolean shouldValidate() {
return validate;
}
@@ -114,11 +73,11 @@ public class AlterResourcePlanDesc extends DDLDesc implements Serializable {
this.validate = validate;
}
- public void setIsEnableActivate(boolean b) {
- this.isEnableActivate = b;
- }
-
public boolean isEnableActivate() {
return isEnableActivate;
}
+
+ public void setIsEnableActivate(boolean b) {
+ this.isEnableActivate = b;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java
index 94414ef..11d448b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterWMTriggerDesc.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.plan;
import java.io.Serializable;
+import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.ql.plan.Explain.Level;
@Explain(displayName="Alter WM Trigger",
@@ -27,58 +28,21 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level;
public class AlterWMTriggerDesc extends DDLDesc implements Serializable {
private static final long serialVersionUID = -2105736261687539210L;
- private String rpName;
- private String triggerName;
- private String triggerExpression;
- private String actionExpression;
+ private WMTrigger trigger;
public AlterWMTriggerDesc() {}
- public AlterWMTriggerDesc(String rpName, String triggerName, String triggerExpression,
- String actionExpression) {
- this.rpName = rpName;
- this.triggerName = triggerName;
- this.triggerExpression = triggerExpression;
- this.actionExpression = actionExpression;
+ public AlterWMTriggerDesc(WMTrigger trigger) {
+ this.trigger = trigger;
}
- @Explain(displayName="resourcePlanName",
+ @Explain(displayName="trigger",
explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getRpName() {
- return rpName;
+ public WMTrigger getTrigger() {
+ return trigger;
}
- public void setRpName(String rpName) {
- this.rpName = rpName;
- }
-
- @Explain(displayName="triggerName",
- explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getTriggerName() {
- return triggerName;
- }
-
- public void setTriggerName(String triggerName) {
- this.triggerName = triggerName;
- }
-
- @Explain(displayName="triggerExpression",
- explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getTriggerExpression() {
- return triggerExpression;
- }
-
- public void setTriggerExpression(String triggerExpression) {
- this.triggerExpression = triggerExpression;
- }
-
- @Explain(displayName="actionExpression",
- explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getActionExpression() {
- return actionExpression;
- }
-
- public void setActionExpression(String actionExpression) {
- this.actionExpression = actionExpression;
+ public void setTrigger(WMTrigger trigger) {
+ this.trigger = trigger;
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java
new file mode 100644
index 0000000..3d5c4a5
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java
@@ -0,0 +1,41 @@
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+@Explain(displayName = "Create/Alter Mapping",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class CreateOrAlterWMMappingDesc extends DDLDesc implements Serializable {
+ private static final long serialVersionUID = -442968568922083053L;
+
+ private WMMapping mapping;
+ private boolean update;
+
+ public CreateOrAlterWMMappingDesc() {}
+
+ public CreateOrAlterWMMappingDesc(WMMapping mapping, boolean update) {
+ this.mapping = mapping;
+ this.update = update;
+ }
+
+ @Explain(displayName = "mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public WMMapping getMapping() {
+ return mapping;
+ }
+
+ public void setMapping(WMMapping mapping) {
+ this.mapping = mapping;
+ }
+
+ @Explain(displayName = "update",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public boolean isUpdate() {
+ return update;
+ }
+
+ public void setUpdate(boolean update) {
+ this.update = update;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java
new file mode 100644
index 0000000..b6aa3f1
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java
@@ -0,0 +1,50 @@
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hive.metastore.api.WMPool;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+@Explain(displayName = "Create/Alter Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class CreateOrAlterWMPoolDesc extends DDLDesc implements Serializable {
+ private static final long serialVersionUID = 4872940135771213510L;
+
+ private WMPool pool;
+ private String poolPath;
+ private boolean update;
+
+ public CreateOrAlterWMPoolDesc() {}
+
+ public CreateOrAlterWMPoolDesc(WMPool pool, String poolPath, boolean update) {
+ this.pool = pool;
+ this.poolPath = poolPath;
+ this.update = update;
+ }
+
+ @Explain(displayName="pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public WMPool getPool() {
+ return pool;
+ }
+
+ public void setPool(WMPool pool) {
+ this.pool = pool;
+ }
+
+ @Explain(displayName="poolPath", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public String getPoolPath() {
+ return poolPath;
+ }
+
+ public void setPoolPath(String poolPath) {
+ this.poolPath = poolPath;
+ }
+
+ @Explain(displayName="isUpdate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public boolean isUpdate() {
+ return update;
+ }
+
+ public void setUpdate(boolean update) {
+ this.update = update;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java
new file mode 100644
index 0000000..e1f912f
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java
@@ -0,0 +1,66 @@
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+@Explain(displayName = "Create/Drop Trigger to pool mappings",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class CreateOrDropTriggerToPoolMappingDesc extends DDLDesc implements Serializable {
+ private static final long serialVersionUID = 383046258694558029L;
+
+ private String resourcePlanName;
+ private String triggerName;
+ private String poolPath;
+ private boolean drop;
+
+ public CreateOrDropTriggerToPoolMappingDesc() {}
+
+ public CreateOrDropTriggerToPoolMappingDesc(String resourcePlanName, String triggerName,
+ String poolPath, boolean drop) {
+ this.resourcePlanName = resourcePlanName;
+ this.triggerName = triggerName;
+ this.poolPath = poolPath;
+ this.drop = drop;
+ }
+
+ @Explain(displayName = "resourcePlanName",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public String getResourcePlanName() {
+ return resourcePlanName;
+ }
+
+ public void setResourcePlanName(String resourcePlanName) {
+ this.resourcePlanName = resourcePlanName;
+ }
+
+ @Explain(displayName = "triggerName",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public String getTriggerName() {
+ return triggerName;
+ }
+
+ public void setTriggerName(String triggerName) {
+ this.triggerName = triggerName;
+ }
+
+ @Explain(displayName = "poolPath",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public String getPoolPath() {
+ return poolPath;
+ }
+
+ public void setPoolPath(String poolPath) {
+ this.poolPath = poolPath;
+ }
+
+ @Explain(displayName = "drop or create",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public boolean shouldDrop() {
+ return drop;
+ }
+
+ public void setDrop(boolean drop) {
+ this.drop = drop;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java
index 348e315..efdd05c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java
@@ -20,32 +20,28 @@ package org.apache.hadoop.hive.ql.plan;
import java.io.Serializable;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.ql.plan.Explain.Level;
@Explain(displayName = "Create ResourcePlan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
public class CreateResourcePlanDesc extends DDLDesc implements Serializable {
+ private static final long serialVersionUID = -3492803425541479414L;
- private static final long serialVersionUID = -3649343104271794404L;
-
- private String planName;
- private Integer queryParallelism;
+ private WMResourcePlan resourcePlan;
// For serialization only.
public CreateResourcePlanDesc() {
}
public CreateResourcePlanDesc(String planName, Integer queryParallelism) {
- this.planName = planName;
- this.queryParallelism = queryParallelism;
- }
-
- @Explain(displayName="name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getName() {
- return planName;
+ resourcePlan = new WMResourcePlan(planName);
+ if (queryParallelism != null) {
+ resourcePlan.setQueryParallelism(queryParallelism);
+ }
}
- @Explain(displayName="queryParallelism")
- public Integer getQueryParallelism() {
- return queryParallelism;
+ @Explain(displayName="resourcePlan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public WMResourcePlan getResourcePlan() {
+ return resourcePlan;
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java
index 92eaefd..c1dcb3c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateWMTriggerDesc.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.plan;
import java.io.Serializable;
+import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.ql.plan.Explain.Level;
@Explain(displayName="Create WM Trigger",
@@ -27,58 +28,21 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level;
public class CreateWMTriggerDesc extends DDLDesc implements Serializable {
private static final long serialVersionUID = 1705317739121300923L;
- private String rpName;
- private String triggerName;
- private String triggerExpression;
- private String actionExpression;
+ private WMTrigger trigger;
public CreateWMTriggerDesc() {}
- public CreateWMTriggerDesc(String rpName, String triggerName, String triggerExpression,
- String actionExpression) {
- this.rpName = rpName;
- this.triggerName = triggerName;
- this.triggerExpression = triggerExpression;
- this.actionExpression = actionExpression;
+ public CreateWMTriggerDesc(WMTrigger trigger) {
+ this.trigger = trigger;
}
- @Explain(displayName="resourcePlanName",
+ @Explain(displayName="trigger",
explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getRpName() {
- return rpName;
+ public WMTrigger getTrigger() {
+ return trigger;
}
- public void setRpName(String rpName) {
- this.rpName = rpName;
- }
-
- @Explain(displayName="triggerName",
- explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getTriggerName() {
- return triggerName;
- }
-
- public void setTriggerName(String triggerName) {
- this.triggerName = triggerName;
- }
-
- @Explain(displayName="triggerExpression",
- explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getTriggerExpression() {
- return triggerExpression;
- }
-
- public void setTriggerExpression(String triggerExpression) {
- this.triggerExpression = triggerExpression;
- }
-
- @Explain(displayName="actionExpression",
- explainLevels={ Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getActionExpression() {
- return actionExpression;
- }
-
- public void setActionExpression(String actionExpression) {
- this.actionExpression = actionExpression;
+ public void setTrigger(WMTrigger trigger) {
+ this.trigger = trigger;
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
index a9b39be..eb19ab0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
@@ -96,6 +96,14 @@ public class DDLWork implements Serializable {
private AlterWMTriggerDesc alterWMTriggerDesc;
private DropWMTriggerDesc dropWMTriggerDesc;
+ private CreateOrAlterWMPoolDesc wmPoolDesc;
+ private DropWMPoolDesc dropWMPoolDesc;
+
+ private CreateOrAlterWMMappingDesc wmMappingDesc;
+ private DropWMMappingDesc dropWMMappingDesc;
+
+ private CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc;
+
boolean needLock = false;
/**
@@ -582,31 +590,61 @@ public class DDLWork implements Serializable {
public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
DropResourcePlanDesc dropResourcePlanDesc) {
this(inputs, outputs);
- this.setDropResourcePlanDesc(dropResourcePlanDesc);
+ this.dropResourcePlanDesc = dropResourcePlanDesc;
}
public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
AlterResourcePlanDesc alterResourcePlanDesc) {
this(inputs, outputs);
- this.setAlterResourcePlanDesc(alterResourcePlanDesc);
+ this.alterResourcePlanDesc = alterResourcePlanDesc;
}
public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
CreateWMTriggerDesc createWMTriggerDesc) {
this(inputs, outputs);
- this.setCreateWMTriggerDesc(createWMTriggerDesc);
+ this.createWMTriggerDesc = createWMTriggerDesc;
}
public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
AlterWMTriggerDesc alterWMTriggerDesc) {
this(inputs, outputs);
- this.setAlterWMTriggerDesc(alterWMTriggerDesc);
+ this.alterWMTriggerDesc = alterWMTriggerDesc;
}
public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
DropWMTriggerDesc dropWMTriggerDesc) {
this(inputs, outputs);
- this.setDropWMTriggerDesc(dropWMTriggerDesc);
+ this.dropWMTriggerDesc = dropWMTriggerDesc;
+ }
+
+ public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+ CreateOrAlterWMPoolDesc wmPoolDesc) {
+ this(inputs, outputs);
+ this.wmPoolDesc = wmPoolDesc;
+ }
+
+ public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+ DropWMPoolDesc dropWMPoolDesc) {
+ this(inputs, outputs);
+ this.dropWMPoolDesc = dropWMPoolDesc;
+ }
+
+ public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+ CreateOrAlterWMMappingDesc wmMappingDesc) {
+ this(inputs, outputs);
+ this.wmMappingDesc = wmMappingDesc;
+ }
+
+ public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+ DropWMMappingDesc dropWMMappingDesc) {
+ this(inputs, outputs);
+ this.dropWMMappingDesc = dropWMMappingDesc;
+ }
+
+ public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+ CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc) {
+ this(inputs, outputs);
+ this.triggerToPoolMappingDesc = triggerToPoolMappingDesc;
}
/**
@@ -1363,4 +1401,44 @@ public class DDLWork implements Serializable {
public void setDropWMTriggerDesc(DropWMTriggerDesc dropWMTriggerDesc) {
this.dropWMTriggerDesc = dropWMTriggerDesc;
}
+
+ public CreateOrAlterWMPoolDesc getWmPoolDesc() {
+ return wmPoolDesc;
+ }
+
+ public void setWmPoolDesc(CreateOrAlterWMPoolDesc wmPoolDesc) {
+ this.wmPoolDesc = wmPoolDesc;
+ }
+
+ public DropWMPoolDesc getDropWMPoolDesc() {
+ return dropWMPoolDesc;
+ }
+
+ public void setDropWMPoolDesc(DropWMPoolDesc dropWMPoolDesc) {
+ this.dropWMPoolDesc = dropWMPoolDesc;
+ }
+
+ public CreateOrAlterWMMappingDesc getWmMappingDesc() {
+ return wmMappingDesc;
+ }
+
+ public void setWmMappingDesc(CreateOrAlterWMMappingDesc wmMappingDesc) {
+ this.wmMappingDesc = wmMappingDesc;
+ }
+
+ public DropWMMappingDesc getDropWMMappingDesc() {
+ return dropWMMappingDesc;
+ }
+
+ public void setDropWMMappingDesc(DropWMMappingDesc dropWMMappingDesc) {
+ this.dropWMMappingDesc = dropWMMappingDesc;
+ }
+
+ public CreateOrDropTriggerToPoolMappingDesc getTriggerToPoolMappingDesc() {
+ return triggerToPoolMappingDesc;
+ }
+
+ public void setTriggerToPoolMappingDesc(CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc) {
+ this.triggerToPoolMappingDesc = triggerToPoolMappingDesc;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java
new file mode 100644
index 0000000..56b81ca
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java
@@ -0,0 +1,29 @@
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+@Explain(displayName = "Drop resource plan",
+ explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class DropWMMappingDesc extends DDLDesc implements Serializable {
+ private static final long serialVersionUID = -1567558687529244218L;
+
+ private WMMapping mapping;
+
+ public DropWMMappingDesc() {}
+
+ public DropWMMappingDesc(WMMapping mapping) {
+ this.mapping = mapping;
+ }
+
+ @Explain(displayName = "mapping", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public WMMapping getMapping() {
+ return mapping;
+ }
+
+ public void setMapping(WMMapping mapping) {
+ this.mapping = mapping;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java
new file mode 100644
index 0000000..ff1bedd
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java
@@ -0,0 +1,33 @@
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+
+public class DropWMPoolDesc extends DDLDesc implements Serializable {
+ private static final long serialVersionUID = -2608462103392563252L;
+
+ private String resourcePlanName;
+ private String poolPath;
+
+ public DropWMPoolDesc() {}
+
+ public DropWMPoolDesc(String resourcePlanName, String poolPath) {
+ this.resourcePlanName = resourcePlanName;
+ this.poolPath = poolPath;
+ }
+
+ public String getResourcePlanName() {
+ return resourcePlanName;
+ }
+
+ public void setResourcePlanName(String resourcePlanName) {
+ this.resourcePlanName = resourcePlanName;
+ }
+
+ public String getPoolPath() {
+ return poolPath;
+ }
+
+ public void setPoolPath(String poolPath) {
+ this.poolPath = poolPath;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
index 3fb1c26..32a24e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
@@ -149,7 +149,14 @@ public enum HiveOperation {
DROP_RESOURCEPLAN("DROP RESOURCEPLAN", null, null, false, false),
CREATE_TRIGGER("CREATE TRIGGER", null, null, false, false),
ALTER_TRIGGER("ALTER TRIGGER", null, null, false, false),
- DROP_TRIGGER("DROP TRIGGER", null, null, false, false);
+ DROP_TRIGGER("DROP TRIGGER", null, null, false, false),
+ CREATE_POOL("CREATE POOL", null, null, false, false),
+ ALTER_POOL("ALTER POOL", null, null, false, false),
+ DROP_POOL("DROP POOL", null, null, false, false),
+ CREATE_MAPPING("CREATE MAPPING", null, null, false, false),
+ ALTER_MAPPING("ALTER MAPPING", null, null, false, false),
+ DROP_MAPPING("DROP MAPPING", null, null, false, false);
+
private String operationName;
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
index dac4471..d3aad3a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
@@ -141,6 +141,12 @@ public enum HiveOperationType {
CREATE_TRIGGER,
ALTER_TRIGGER,
DROP_TRIGGER,
+ CREATE_POOL,
+ ALTER_POOL,
+ DROP_POOL,
+ CREATE_MAPPING,
+ ALTER_MAPPING,
+ DROP_MAPPING,
// ==== Hive command operation types starts here ==== //
SET,
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
index b74b460..dc04b45 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
@@ -319,6 +319,12 @@ public class Operation2Privilege {
adminPrivOps.add(HiveOperationType.CREATE_TRIGGER);
adminPrivOps.add(HiveOperationType.ALTER_TRIGGER);
adminPrivOps.add(HiveOperationType.DROP_TRIGGER);
+ adminPrivOps.add(HiveOperationType.CREATE_POOL);
+ adminPrivOps.add(HiveOperationType.ALTER_POOL);
+ adminPrivOps.add(HiveOperationType.DROP_POOL);
+ adminPrivOps.add(HiveOperationType.CREATE_MAPPING);
+ adminPrivOps.add(HiveOperationType.ALTER_MAPPING);
+ adminPrivOps.add(HiveOperationType.DROP_MAPPING);
// operations require select priv
op2Priv.put(HiveOperationType.SHOWCOLUMNS, PrivRequirement.newIOPrivRequirement
@@ -485,6 +491,12 @@ public class Operation2Privilege {
op2Priv.put(HiveOperationType.CREATE_TRIGGER, PrivRequirement.newIOPrivRequirement(null, null));
op2Priv.put(HiveOperationType.ALTER_TRIGGER, PrivRequirement.newIOPrivRequirement(null, null));
op2Priv.put(HiveOperationType.DROP_TRIGGER, PrivRequirement.newIOPrivRequirement(null, null));
+ op2Priv.put(HiveOperationType.CREATE_POOL, PrivRequirement.newIOPrivRequirement(null, null));
+ op2Priv.put(HiveOperationType.ALTER_POOL, PrivRequirement.newIOPrivRequirement(null, null));
+ op2Priv.put(HiveOperationType.DROP_POOL, PrivRequirement.newIOPrivRequirement(null, null));
+ op2Priv.put(HiveOperationType.CREATE_MAPPING, PrivRequirement.newIOPrivRequirement(null, null));
+ op2Priv.put(HiveOperationType.ALTER_MAPPING, PrivRequirement.newIOPrivRequirement(null, null));
+ op2Priv.put(HiveOperationType.DROP_MAPPING, PrivRequirement.newIOPrivRequirement(null, null));
}
/**
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
index 156da4c..4cb9172 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
@@ -136,7 +136,7 @@ public class TestWorkloadManager {
public static WMMapping mapping(String type, String user, String pool, int ordering) {
WMMapping mapping = new WMMapping("rp", type, user);
- mapping.setPoolName(pool);
+ mapping.setPoolPath(pool);
mapping.setOrdering(ordering);
return mapping;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/test/queries/clientpositive/resourceplan.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/resourceplan.q b/ql/src/test/queries/clientpositive/resourceplan.q
index a094712..afb9ceb 100644
--- a/ql/src/test/queries/clientpositive/resourceplan.q
+++ b/ql/src/test/queries/clientpositive/resourceplan.q
@@ -25,11 +25,14 @@ SHOW RESOURCE PLAN plan_1;
SELECT * FROM SYS.WM_RESOURCEPLANS;
-- Create and show plan_2.
-CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM 10;
+CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=10;
SHOW RESOURCE PLANS;
SHOW RESOURCE PLAN plan_2;
SELECT * FROM SYS.WM_RESOURCEPLANS;
+-- Should fail cannot set pool in create.
+CREATE RESOURCE PLAN plan_3 WITH QUERY_PARALLELISM=5, DEFAULT POOL = `all`;
+
--
-- Rename resource plans.
--
@@ -47,7 +50,7 @@ ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 20;
SELECT * FROM SYS.WM_RESOURCEPLANS;
-- Will fail for now; there are no pools.
-ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = 'default';
+ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1;
SELECT * FROM SYS.WM_RESOURCEPLANS;
--
@@ -109,6 +112,10 @@ DROP RESOURCE PLAN plan_2;
DROP RESOURCE PLAN plan_3;
SELECT * FROM SYS.WM_RESOURCEPLANS;
+-- Use reserved keyword table as name.
+CREATE RESOURCE PLAN `table`;
+ALTER RESOURCE PLAN `table` SET QUERY_PARALLELISM = 1;
+SELECT * FROM SYS.WM_RESOURCEPLANS;
--
-- Create trigger commands.
@@ -134,6 +141,16 @@ SELECT * FROM SYS.WM_TRIGGERS;
-- No edit on active resource plan.
CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ = 0m DO MOVE TO null_pool;
+-- Add trigger with reserved keywords.
+CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > 100K DO MOVE TO `table`;
+CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > 100K DO MOVE TO `default`;
+CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > 1M DO MOVE TO `default`;
+CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL;
+CREATE TRIGGER `table`.`trigger2` WHEN BYTES_READ > 100 DO KILL;
+SELECT * FROM SYS.WM_TRIGGERS;
+DROP TRIGGER `table`.`database`;
+SELECT * FROM SYS.WM_TRIGGERS;
+
-- Cannot drop/change trigger from enabled plan.
ALTER RESOURCE PLAN plan_1 ENABLE;
SELECT * FROM SYS.WM_RESOURCEPLANS;
@@ -150,3 +167,136 @@ ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ = 1000K DO KILL;
ALTER RESOURCE PLAN plan_2 DISABLE;
CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ = 0 DO MOVE TO null_pool;
SELECT * FROM SYS.WM_TRIGGERS;
+
+
+--
+-- Create pool command.
+--
+
+-- Cannot create pool in active plans.
+CREATE POOL plan_1.default WITH
+ ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default';
+
+CREATE POOL plan_2.default WITH
+ ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default';
+SELECT * FROM SYS.WM_POOLS;
+
+CREATE POOL plan_2.default.c1 WITH
+ ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='priority';
+
+CREATE POOL plan_2.default.c2 WITH
+ QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.7;
+
+ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.2;
+
+ALTER POOL plan_2.default SET path = def;
+SELECT * FROM SYS.WM_POOLS;
+
+DROP POOL plan_2.default;
+SELECT * FROM SYS.WM_POOLS;
+
+-- Create failed no parent pool found.
+CREATE POOL plan_2.child1.child2 WITH
+ QUERY_PARALLELISM=2, SCHEDULING_POLICY='fcfs', ALLOC_FRACTION=0.8;
+
+-- Create nested pools.
+CREATE POOL `table`.`table` WITH
+ SCHEDULING_POLICY='random', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1;
+
+CREATE POOL `table`.`table`.pool1 WITH
+ SCHEDULING_POLICY='priority', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9;
+CREATE POOL `table`.`table`.pool1.child1 WITH
+ SCHEDULING_POLICY='random', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3;
+CREATE POOL `table`.`table`.pool1.child2 WITH
+ SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.7;
+ALTER POOL `table`.`table` SET ALLOC_FRACTION=0.0;
+SELECT * FROM SYS.WM_POOLS;
+
+-- Rename with child pools and parent pool.
+ALTER POOL `table`.`table`.pool1 SET PATH = `table`.pool;
+SELECT * FROM SYS.WM_POOLS;
+
+-- Fails has child pools.
+DROP POOL `table`.`table`;
+SELECT * FROM SYS.WM_POOLS;
+
+-- Fails default is default pool :-).
+DROP POOL `table`.default;
+SELECT * FROM SYS.WM_POOLS;
+SELECT * FROM SYS.WM_RESOURCEPLANS;
+
+-- Changed default pool, now it should work.
+ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool;
+DROP POOL `table`.default;
+SELECT * FROM SYS.WM_POOLS;
+
+--
+-- Pool to trigger mappings.
+--
+
+-- Success.
+ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1;
+ALTER POOL plan_2.def.c2 ADD TRIGGER trigger_1;
+
+-- With keywords, hopefully nobody does this.
+ALTER POOL `table`.`table` ADD TRIGGER `table`;
+
+-- Test m:n mappings.
+ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `table`;
+ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `trigger1`;
+ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger1`;
+ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger2`;
+SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS;
+
+-- Failures.
+
+
+-- pool does not exist.
+ALTER POOL plan_2.default ADD TRIGGER trigger_1;
+
+-- Trigger does not exist.
+ALTER POOL plan_2.def ADD TRIGGER trigger_2;
+
+SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS;
+
+-- Drop success.
+ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1;
+
+-- Drop fail, does not exist.
+ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_2;
+
+-- Drops related mappings too.
+DROP POOL `table`.`table`.pool.child1;
+DROP POOL `table`.`table`.pool.child2;
+
+SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS;
+
+
+--
+-- User and group mappings.
+--
+
+CREATE USER MAPPING "user1" IN plan_2 TO def;
+CREATE USER MAPPING 'user2' IN plan_2 TO def WITH ORDER 1;
+CREATE GROUP MAPPING "group1" IN plan_2 TO def.c1;
+CREATE GROUP MAPPING 'group2' IN plan_2 TO def.c2 WITH ORDER 1;
+SELECT * FROM SYS.WM_MAPPINGS;
+
+-- Drop pool failed, pool in use.
+DROP POOL plan_2.def.c1;
+
+DROP USER MAPPING "user2" in plan_2;
+DROP GROUP MAPPING "group2" in plan_2;
+SELECT * FROM SYS.WM_MAPPINGS;
+
+CREATE RESOURCE PLAN plan_4;
+
+ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE;
+
+DROP RESOURCE PLAN plan_2;
+
+SELECT * FROM SYS.WM_RESOURCEPLANS;
+SELECT * FROM SYS.WM_POOLS;
+SELECT * FROM SYS.WM_TRIGGERS;
+SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS;
+SELECT * FROM SYS.WM_MAPPINGS;
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/test/results/clientpositive/llap/resourceplan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/resourceplan.q.out b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
index fe4d77a..b6c2c79 100644
--- a/ql/src/test/results/clientpositive/llap/resourceplan.q.out
+++ b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
@@ -2211,6 +2211,154 @@ ON
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: SYS@WM_TRIGGERS
POSTHOOK: Output: database:sys
+PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS` (
+ `RP_NAME` string,
+ `PATH` string,
+ `ALLOC_FRACTION` double,
+ `QUERY_PARALLELISM` int,
+ `SCHEDULING_POLICY` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ WM_POOL.PATH,
+ WM_POOL.ALLOC_FRACTION,
+ WM_POOL.QUERY_PARALLELISM,
+ WM_POOL.SCHEDULING_POLICY
+FROM
+ WM_POOL
+JOIN
+ WM_RESOURCEPLAN
+ON
+ WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: SYS@WM_POOLS
+PREHOOK: Output: database:sys
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS` (
+ `RP_NAME` string,
+ `PATH` string,
+ `ALLOC_FRACTION` double,
+ `QUERY_PARALLELISM` int,
+ `SCHEDULING_POLICY` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ WM_POOL.PATH,
+ WM_POOL.ALLOC_FRACTION,
+ WM_POOL.QUERY_PARALLELISM,
+ WM_POOL.SCHEDULING_POLICY
+FROM
+ WM_POOL
+JOIN
+ WM_RESOURCEPLAN
+ON
+ WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: SYS@WM_POOLS
+POSTHOOK: Output: database:sys
+PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` (
+ `RP_NAME` string,
+ `POOL_PATH` string,
+ `TRIGGER_NAME` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME RP_NAME,
+ WM_POOL.PATH POOL_PATH,
+ WM_TRIGGER.NAME TRIGGER_NAME
+FROM
+ WM_POOL_TO_TRIGGER
+JOIN WM_POOL ON WM_POOL_TO_TRIGGER.POOL_ID = WM_POOL.POOL_ID
+JOIN WM_TRIGGER ON WM_POOL_TO_TRIGGER.TRIGGER_ID = WM_TRIGGER.TRIGGER_ID
+JOIN WM_RESOURCEPLAN ON WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: SYS@WM_POOLS_TO_TRIGGERS
+PREHOOK: Output: database:sys
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` (
+ `RP_NAME` string,
+ `POOL_PATH` string,
+ `TRIGGER_NAME` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME RP_NAME,
+ WM_POOL.PATH POOL_PATH,
+ WM_TRIGGER.NAME TRIGGER_NAME
+FROM
+ WM_POOL_TO_TRIGGER
+JOIN WM_POOL ON WM_POOL_TO_TRIGGER.POOL_ID = WM_POOL.POOL_ID
+JOIN WM_TRIGGER ON WM_POOL_TO_TRIGGER.TRIGGER_ID = WM_TRIGGER.TRIGGER_ID
+JOIN WM_RESOURCEPLAN ON WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: SYS@WM_POOLS_TO_TRIGGERS
+POSTHOOK: Output: database:sys
+PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_MAPPINGS` (
+ `RP_NAME` string,
+ `ENTITY_TYPE` string,
+ `ENTITY_NAME` string,
+ `POOL_PATH` string,
+ `ORDERING` int
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ ENTITY_TYPE,
+ ENTITY_NAME,
+ WM_POOL.PATH,
+ ORDERING
+FROM
+ WM_MAPPING
+JOIN WM_RESOURCEPLAN ON WM_MAPPING.RP_ID = WM_RESOURCEPLAN.RP_ID
+LEFT OUTER JOIN WM_POOL ON WM_POOL.POOL_ID = WM_MAPPING.POOL_ID"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: SYS@WM_MAPPINGS
+PREHOOK: Output: database:sys
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_MAPPINGS` (
+ `RP_NAME` string,
+ `ENTITY_TYPE` string,
+ `ENTITY_NAME` string,
+ `POOL_PATH` string,
+ `ORDERING` int
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ ENTITY_TYPE,
+ ENTITY_NAME,
+ WM_POOL.PATH,
+ ORDERING
+FROM
+ WM_MAPPING
+JOIN WM_RESOURCEPLAN ON WM_MAPPING.RP_ID = WM_RESOURCEPLAN.RP_ID
+LEFT OUTER JOIN WM_POOL ON WM_POOL.POOL_ID = WM_MAPPING.POOL_ID"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: SYS@WM_MAPPINGS
+POSTHOOK: Output: database:sys
PREHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA
PREHOOK: type: DROPDATABASE
POSTHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA
@@ -3000,9 +3148,9 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_1 DISABLED NULL default
-PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM 10
+PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=10
PREHOOK: type: CREATE RESOURCEPLAN
-POSTHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM 10
+POSTHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=10
POSTHOOK: type: CREATE RESOURCEPLAN
PREHOOK: query: SHOW RESOURCE PLANS
PREHOOK: type: SHOW RESOURCEPLAN
@@ -3025,6 +3173,7 @@ POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_1 DISABLED NULL default
plan_2 DISABLED 10 default
+FAILED: SemanticException Invalid set in create resource plan: TOK_DEFAULT_POOL
PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_2
PREHOOK: type: ALTER RESOURCEPLAN
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Resource plan name should be unique: )
@@ -3066,9 +3215,9 @@ POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_3 DISABLED 20 default
plan_2 DISABLED 10 default
-PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = 'default'
+PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1
PREHOOK: type: ALTER RESOURCEPLAN
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Portion of expression could not be parsed: and resourcePlan == rp)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find pool: default1)
PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
PREHOOK: type: QUERY
PREHOOK: Input: sys@wm_resourceplans
@@ -3246,6 +3395,24 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_2 ACTIVE 10 default
+PREHOOK: query: CREATE RESOURCE PLAN `table`
+PREHOOK: type: CREATE RESOURCEPLAN
+POSTHOOK: query: CREATE RESOURCE PLAN `table`
+POSTHOOK: type: CREATE RESOURCEPLAN
+PREHOOK: query: ALTER RESOURCE PLAN `table` SET QUERY_PARALLELISM = 1
+PREHOOK: type: ALTER RESOURCEPLAN
+POSTHOOK: query: ALTER RESOURCE PLAN `table` SET QUERY_PARALLELISM = 1
+POSTHOOK: type: ALTER RESOURCEPLAN
+PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_resourceplans
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_resourceplans
+#### A masked pattern was here ####
+plan_2 ACTIVE 10 default
+table DISABLED 1 default
PREHOOK: query: CREATE RESOURCE PLAN plan_1
PREHOOK: type: CREATE RESOURCEPLAN
POSTHOOK: query: CREATE RESOURCE PLAN plan_1
@@ -3310,6 +3477,57 @@ plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ = 0m DO MOVE TO null_pool
PREHOOK: type: CREATE TRIGGER
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+PREHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > 100K DO MOVE TO `table`
+PREHOOK: type: CREATE TRIGGER
+POSTHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > 100K DO MOVE TO `table`
+POSTHOOK: type: CREATE TRIGGER
+PREHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > 100K DO MOVE TO `default`
+PREHOOK: type: CREATE TRIGGER
+POSTHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > 100K DO MOVE TO `default`
+POSTHOOK: type: CREATE TRIGGER
+PREHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > 1M DO MOVE TO `default`
+PREHOOK: type: CREATE TRIGGER
+POSTHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > 1M DO MOVE TO `default`
+POSTHOOK: type: CREATE TRIGGER
+PREHOOK: query: CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL
+PREHOOK: type: CREATE TRIGGER
+POSTHOOK: query: CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL
+POSTHOOK: type: CREATE TRIGGER
+PREHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN BYTES_READ > 100 DO KILL
+PREHOOK: type: CREATE TRIGGER
+POSTHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN BYTES_READ > 100 DO KILL
+POSTHOOK: type: CREATE TRIGGER
+PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_triggers
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_triggers
+#### A masked pattern was here ####
+plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
+table table BYTES_WRITTEN > 100K MOVE TO table
+table trigger BYTES_WRITTEN > 100K MOVE TO default
+table database BYTES_WRITTEN > 1M MOVE TO default
+table trigger1 ELAPSED_TIME > 10 KILL
+table trigger2 BYTES_READ > 100 KILL
+PREHOOK: query: DROP TRIGGER `table`.`database`
+PREHOOK: type: DROP TRIGGER
+POSTHOOK: query: DROP TRIGGER `table`.`database`
+POSTHOOK: type: DROP TRIGGER
+PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_triggers
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_triggers
+#### A masked pattern was here ####
+plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
+table table BYTES_WRITTEN > 100K MOVE TO table
+table trigger BYTES_WRITTEN > 100K MOVE TO default
+table trigger1 ELAPSED_TIME > 10 KILL
+table trigger2 BYTES_READ > 100 KILL
PREHOOK: query: ALTER RESOURCE PLAN plan_1 ENABLE
PREHOOK: type: ALTER RESOURCEPLAN
POSTHOOK: query: ALTER RESOURCE PLAN plan_1 ENABLE
@@ -3323,6 +3541,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_2 ACTIVE 10 default
+table DISABLED 1 default
plan_1 ENABLED NULL default
PREHOOK: query: DROP TRIGGER plan_1.trigger_2
PREHOOK: type: DROP TRIGGER
@@ -3343,6 +3562,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_2 ENABLED 10 default
+table DISABLED 1 default
plan_1 ACTIVE NULL default
PREHOOK: query: DROP TRIGGER plan_1.trigger_2
PREHOOK: type: DROP TRIGGER
@@ -3366,5 +3586,425 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
+plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
+table table BYTES_WRITTEN > 100K MOVE TO table
+table trigger BYTES_WRITTEN > 100K MOVE TO default
+table trigger1 ELAPSED_TIME > 10 KILL
+table trigger2 BYTES_READ > 100 KILL
plan_2 trigger_1 BYTES_READ = 0 MOVE TO null_pool
+PREHOOK: query: CREATE POOL plan_1.default WITH
+ ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default'
+PREHOOK: type: CREATE POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+PREHOOK: query: CREATE POOL plan_2.default WITH
+ ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default'
+PREHOOK: type: CREATE POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Pool already exists: )
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+plan_2 default 1.0 4 NULL
+table default 1.0 4 NULL
+plan_1 default 1.0 4 NULL
+PREHOOK: query: CREATE POOL plan_2.default.c1 WITH
+ ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='priority'
+PREHOOK: type: CREATE POOL
+POSTHOOK: query: CREATE POOL plan_2.default.c1 WITH
+ ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='priority'
+POSTHOOK: type: CREATE POOL
+PREHOOK: query: CREATE POOL plan_2.default.c2 WITH
+ QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.7
+PREHOOK: type: CREATE POOL
+POSTHOOK: query: CREATE POOL plan_2.default.c2 WITH
+ QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.7
+POSTHOOK: type: CREATE POOL
+PREHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.2
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.2
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: ALTER POOL plan_2.default SET path = def
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL plan_2.default SET path = def
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+plan_2 def 1.0 4 NULL
+table default 1.0 4 NULL
+plan_1 default 1.0 4 NULL
+plan_2 def.c1 0.3 3 priority
+plan_2 def.c2 0.2 2 fair
+PREHOOK: query: DROP POOL plan_2.default
+PREHOOK: type: DROP POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot delete pool: default)
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+plan_2 def 1.0 4 NULL
+table default 1.0 4 NULL
+plan_1 default 1.0 4 NULL
+plan_2 def.c1 0.3 3 priority
+plan_2 def.c2 0.2 2 fair
+PREHOOK: query: CREATE POOL plan_2.child1.child2 WITH
+ QUERY_PARALLELISM=2, SCHEDULING_POLICY='fcfs', ALLOC_FRACTION=0.8
+PREHOOK: type: CREATE POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Pool path is invalid, the parent does not exist)
+PREHOOK: query: CREATE POOL `table`.`table` WITH
+ SCHEDULING_POLICY='random', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1
+PREHOOK: type: CREATE POOL
+POSTHOOK: query: CREATE POOL `table`.`table` WITH
+ SCHEDULING_POLICY='random', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1
+POSTHOOK: type: CREATE POOL
+PREHOOK: query: CREATE POOL `table`.`table`.pool1 WITH
+ SCHEDULING_POLICY='priority', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9
+PREHOOK: type: CREATE POOL
+POSTHOOK: query: CREATE POOL `table`.`table`.pool1 WITH
+ SCHEDULING_POLICY='priority', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9
+POSTHOOK: type: CREATE POOL
+PREHOOK: query: CREATE POOL `table`.`table`.pool1.child1 WITH
+ SCHEDULING_POLICY='random', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3
+PREHOOK: type: CREATE POOL
+POSTHOOK: query: CREATE POOL `table`.`table`.pool1.child1 WITH
+ SCHEDULING_POLICY='random', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3
+POSTHOOK: type: CREATE POOL
+PREHOOK: query: CREATE POOL `table`.`table`.pool1.child2 WITH
+ SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.7
+PREHOOK: type: CREATE POOL
+POSTHOOK: query: CREATE POOL `table`.`table`.pool1.child2 WITH
+ SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.7
+POSTHOOK: type: CREATE POOL
+PREHOOK: query: ALTER POOL `table`.`table` SET ALLOC_FRACTION=0.0
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL `table`.`table` SET ALLOC_FRACTION=0.0
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+plan_2 def 1.0 4 NULL
+table default 1.0 4 NULL
+plan_1 default 1.0 4 NULL
+plan_2 def.c1 0.3 3 priority
+plan_2 def.c2 0.2 2 fair
+table table 0.0 1 random
+table table.pool1 0.9 3 priority
+table table.pool1.child1 0.3 1 random
+table table.pool1.child2 0.7 3 fair
+PREHOOK: query: ALTER POOL `table`.`table`.pool1 SET PATH = `table`.pool
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL `table`.`table`.pool1 SET PATH = `table`.pool
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+plan_2 def 1.0 4 NULL
+table default 1.0 4 NULL
+plan_1 default 1.0 4 NULL
+plan_2 def.c1 0.3 3 priority
+plan_2 def.c2 0.2 2 fair
+table table 0.0 1 random
+table table.pool 0.9 3 priority
+table table.pool.child1 0.3 1 random
+table table.pool.child2 0.7 3 fair
+PREHOOK: query: DROP POOL `table`.`table`
+PREHOOK: type: DROP POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Pool has children cannot drop.)
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+plan_2 def 1.0 4 NULL
+table default 1.0 4 NULL
+plan_1 default 1.0 4 NULL
+plan_2 def.c1 0.3 3 priority
+plan_2 def.c2 0.2 2 fair
+table table 0.0 1 random
+table table.pool 0.9 3 priority
+table table.pool.child1 0.3 1 random
+table table.pool.child2 0.7 3 fair
+PREHOOK: query: DROP POOL `table`.default
+PREHOOK: type: DROP POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Cannot drop default pool of a resource plan)
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+plan_2 def 1.0 4 NULL
+table default 1.0 4 NULL
+plan_1 default 1.0 4 NULL
+plan_2 def.c1 0.3 3 priority
+plan_2 def.c2 0.2 2 fair
+table table 0.0 1 random
+table table.pool 0.9 3 priority
+table table.pool.child1 0.3 1 random
+table table.pool.child2 0.7 3 fair
+PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_resourceplans
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_resourceplans
+#### A masked pattern was here ####
+plan_2 DISABLED 10 def
+table DISABLED 1 default
+plan_1 ACTIVE NULL default
+PREHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool
+PREHOOK: type: ALTER RESOURCEPLAN
+POSTHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool
+POSTHOOK: type: ALTER RESOURCEPLAN
+PREHOOK: query: DROP POOL `table`.default
+PREHOOK: type: DROP POOL
+POSTHOOK: query: DROP POOL `table`.default
+POSTHOOK: type: DROP POOL
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+plan_2 def 1.0 4 NULL
+plan_1 default 1.0 4 NULL
+plan_2 def.c1 0.3 3 priority
+plan_2 def.c2 0.2 2 fair
+table table 0.0 1 random
+table table.pool 0.9 3 priority
+table table.pool.child1 0.3 1 random
+table table.pool.child2 0.7 3 fair
+PREHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: ALTER POOL plan_2.def.c2 ADD TRIGGER trigger_1
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL plan_2.def.c2 ADD TRIGGER trigger_1
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: ALTER POOL `table`.`table` ADD TRIGGER `table`
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL `table`.`table` ADD TRIGGER `table`
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `table`
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `table`
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `trigger1`
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL `table`.`table`.pool.child1 ADD TRIGGER `trigger1`
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger1`
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger1`
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger2`
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL `table`.`table`.pool.child2 ADD TRIGGER `trigger2`
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools_to_triggers
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools_to_triggers
+#### A masked pattern was here ####
+table table table
+table table.pool.child1 table
+table table.pool.child1 trigger1
+table table.pool.child2 trigger1
+table table.pool.child2 trigger2
+plan_2 def.c1 trigger_1
+plan_2 def.c2 trigger_1
+PREHOOK: query: ALTER POOL plan_2.default ADD TRIGGER trigger_1
+PREHOOK: type: ALTER POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find pool: default)
+PREHOOK: query: ALTER POOL plan_2.def ADD TRIGGER trigger_2
+PREHOOK: type: ALTER POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2)
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools_to_triggers
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools_to_triggers
+#### A masked pattern was here ####
+table table table
+table table.pool.child1 table
+table table.pool.child1 trigger1
+table table.pool.child2 trigger1
+table table.pool.child2 trigger2
+plan_2 def.c1 trigger_1
+plan_2 def.c2 trigger_1
+PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1
+PREHOOK: type: ALTER POOL
+POSTHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_1
+POSTHOOK: type: ALTER POOL
+PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_2
+PREHOOK: type: ALTER POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2)
+PREHOOK: query: DROP POOL `table`.`table`.pool.child1
+PREHOOK: type: DROP POOL
+POSTHOOK: query: DROP POOL `table`.`table`.pool.child1
+POSTHOOK: type: DROP POOL
+PREHOOK: query: DROP POOL `table`.`table`.pool.child2
+PREHOOK: type: DROP POOL
+POSTHOOK: query: DROP POOL `table`.`table`.pool.child2
+POSTHOOK: type: DROP POOL
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools_to_triggers
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools_to_triggers
+#### A masked pattern was here ####
+table table table
+plan_2 def.c2 trigger_1
+PREHOOK: query: CREATE USER MAPPING "user1" IN plan_2 TO def
+PREHOOK: type: CREATE MAPPING
+POSTHOOK: query: CREATE USER MAPPING "user1" IN plan_2 TO def
+POSTHOOK: type: CREATE MAPPING
+PREHOOK: query: CREATE USER MAPPING 'user2' IN plan_2 TO def WITH ORDER 1
+PREHOOK: type: CREATE MAPPING
+POSTHOOK: query: CREATE USER MAPPING 'user2' IN plan_2 TO def WITH ORDER 1
+POSTHOOK: type: CREATE MAPPING
+PREHOOK: query: CREATE GROUP MAPPING "group1" IN plan_2 TO def.c1
+PREHOOK: type: CREATE MAPPING
+POSTHOOK: query: CREATE GROUP MAPPING "group1" IN plan_2 TO def.c1
+POSTHOOK: type: CREATE MAPPING
+PREHOOK: query: CREATE GROUP MAPPING 'group2' IN plan_2 TO def.c2 WITH ORDER 1
+PREHOOK: type: CREATE MAPPING
+POSTHOOK: query: CREATE GROUP MAPPING 'group2' IN plan_2 TO def.c2 WITH ORDER 1
+POSTHOOK: type: CREATE MAPPING
+PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_mappings
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_mappings
+#### A masked pattern was here ####
+plan_2 USER user1 def 0
+plan_2 USER user2 def 1
+plan_2 GROUP group1 def.c1 0
+plan_2 GROUP group2 def.c2 1
+PREHOOK: query: DROP POOL plan_2.def.c1
+PREHOOK: type: DROP POOL
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Exception thrown flushing changes to datastore)
+PREHOOK: query: DROP USER MAPPING "user2" in plan_2
+PREHOOK: type: DROP MAPPING
+POSTHOOK: query: DROP USER MAPPING "user2" in plan_2
+POSTHOOK: type: DROP MAPPING
+PREHOOK: query: DROP GROUP MAPPING "group2" in plan_2
+PREHOOK: type: DROP MAPPING
+POSTHOOK: query: DROP GROUP MAPPING "group2" in plan_2
+POSTHOOK: type: DROP MAPPING
+PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_mappings
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_mappings
+#### A masked pattern was here ####
+plan_2 USER user1 def 0
+plan_2 GROUP group1 def.c1 0
+PREHOOK: query: CREATE RESOURCE PLAN plan_4
+PREHOOK: type: CREATE RESOURCEPLAN
+POSTHOOK: query: CREATE RESOURCE PLAN plan_4
+POSTHOOK: type: CREATE RESOURCEPLAN
+PREHOOK: query: ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE
+PREHOOK: type: ALTER RESOURCEPLAN
+POSTHOOK: query: ALTER RESOURCE PLAN plan_4 ENABLE ACTIVATE
+POSTHOOK: type: ALTER RESOURCEPLAN
+PREHOOK: query: DROP RESOURCE PLAN plan_2
+PREHOOK: type: DROP RESOURCEPLAN
+POSTHOOK: query: DROP RESOURCE PLAN plan_2
+POSTHOOK: type: DROP RESOURCEPLAN
+PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_resourceplans
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_resourceplans
+#### A masked pattern was here ####
+table DISABLED 1 table.pool
+plan_1 ENABLED NULL default
+plan_4 ACTIVE NULL default
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools
+#### A masked pattern was here ####
+table table 0.0 1 random
+table table.pool 0.9 3 priority
+plan_1 default 1.0 4 NULL
+plan_4 default 1.0 4 NULL
+PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_triggers
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_triggers
+#### A masked pattern was here ####
plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
+table table BYTES_WRITTEN > 100K MOVE TO table
+table trigger BYTES_WRITTEN > 100K MOVE TO default
+table trigger1 ELAPSED_TIME > 10 KILL
+table trigger2 BYTES_READ > 100 KILL
+PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_pools_to_triggers
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_pools_to_triggers
+#### A masked pattern was here ####
+table table table
+PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_mappings
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_mappings
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/44ef5991/ql/src/test/results/clientpositive/llap/sysdb.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/sysdb.q.out b/ql/src/test/results/clientpositive/llap/sysdb.q.out
index d8ded1d..9681b6f 100644
--- a/ql/src/test/results/clientpositive/llap/sysdb.q.out
+++ b/ql/src/test/results/clientpositive/llap/sysdb.q.out
@@ -2279,6 +2279,154 @@ ON
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: SYS@WM_TRIGGERS
POSTHOOK: Output: database:sys
+PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS` (
+ `RP_NAME` string,
+ `PATH` string,
+ `ALLOC_FRACTION` double,
+ `QUERY_PARALLELISM` int,
+ `SCHEDULING_POLICY` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ WM_POOL.PATH,
+ WM_POOL.ALLOC_FRACTION,
+ WM_POOL.QUERY_PARALLELISM,
+ WM_POOL.SCHEDULING_POLICY
+FROM
+ WM_POOL
+JOIN
+ WM_RESOURCEPLAN
+ON
+ WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: SYS@WM_POOLS
+PREHOOK: Output: database:sys
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS` (
+ `RP_NAME` string,
+ `PATH` string,
+ `ALLOC_FRACTION` double,
+ `QUERY_PARALLELISM` int,
+ `SCHEDULING_POLICY` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ WM_POOL.PATH,
+ WM_POOL.ALLOC_FRACTION,
+ WM_POOL.QUERY_PARALLELISM,
+ WM_POOL.SCHEDULING_POLICY
+FROM
+ WM_POOL
+JOIN
+ WM_RESOURCEPLAN
+ON
+ WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: SYS@WM_POOLS
+POSTHOOK: Output: database:sys
+PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` (
+ `RP_NAME` string,
+ `POOL_PATH` string,
+ `TRIGGER_NAME` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME RP_NAME,
+ WM_POOL.PATH POOL_PATH,
+ WM_TRIGGER.NAME TRIGGER_NAME
+FROM
+ WM_POOL_TO_TRIGGER
+JOIN WM_POOL ON WM_POOL_TO_TRIGGER.POOL_ID = WM_POOL.POOL_ID
+JOIN WM_TRIGGER ON WM_POOL_TO_TRIGGER.TRIGGER_ID = WM_TRIGGER.TRIGGER_ID
+JOIN WM_RESOURCEPLAN ON WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: SYS@WM_POOLS_TO_TRIGGERS
+PREHOOK: Output: database:sys
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` (
+ `RP_NAME` string,
+ `POOL_PATH` string,
+ `TRIGGER_NAME` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME RP_NAME,
+ WM_POOL.PATH POOL_PATH,
+ WM_TRIGGER.NAME TRIGGER_NAME
+FROM
+ WM_POOL_TO_TRIGGER
+JOIN WM_POOL ON WM_POOL_TO_TRIGGER.POOL_ID = WM_POOL.POOL_ID
+JOIN WM_TRIGGER ON WM_POOL_TO_TRIGGER.TRIGGER_ID = WM_TRIGGER.TRIGGER_ID
+JOIN WM_RESOURCEPLAN ON WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: SYS@WM_POOLS_TO_TRIGGERS
+POSTHOOK: Output: database:sys
+PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_MAPPINGS` (
+ `RP_NAME` string,
+ `ENTITY_TYPE` string,
+ `ENTITY_NAME` string,
+ `POOL_PATH` string,
+ `ORDERING` int
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ ENTITY_TYPE,
+ ENTITY_NAME,
+ WM_POOL.PATH,
+ ORDERING
+FROM
+ WM_MAPPING
+JOIN WM_RESOURCEPLAN ON WM_MAPPING.RP_ID = WM_RESOURCEPLAN.RP_ID
+LEFT OUTER JOIN WM_POOL ON WM_POOL.POOL_ID = WM_MAPPING.POOL_ID"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: SYS@WM_MAPPINGS
+PREHOOK: Output: database:sys
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_MAPPINGS` (
+ `RP_NAME` string,
+ `ENTITY_TYPE` string,
+ `ENTITY_NAME` string,
+ `POOL_PATH` string,
+ `ORDERING` int
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+ WM_RESOURCEPLAN.NAME,
+ ENTITY_TYPE,
+ ENTITY_NAME,
+ WM_POOL.PATH,
+ ORDERING
+FROM
+ WM_MAPPING
+JOIN WM_RESOURCEPLAN ON WM_MAPPING.RP_ID = WM_RESOURCEPLAN.RP_ID
+LEFT OUTER JOIN WM_POOL ON WM_POOL.POOL_ID = WM_MAPPING.POOL_ID"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: SYS@WM_MAPPINGS
+POSTHOOK: Output: database:sys
PREHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA
PREHOOK: type: DROPDATABASE
POSTHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA
@@ -3059,7 +3207,7 @@ POSTHOOK: query: select count(*) from cds
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@cds
#### A masked pattern was here ####
-67
+70
PREHOOK: query: select column_name, type_name, integer_idx from columns_v2 order by column_name, integer_idx limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@columns_v2
@@ -3072,7 +3220,7 @@ a decimal(10,2) 0
action_expression string 3
add_time int 1
aint int 0
-astring string 1
+alloc_fraction double 2
PREHOOK: query: select param_key, param_value from database_params order by param_key, param_value limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@database_params
@@ -3213,7 +3361,7 @@ POSTHOOK: query: select count(*) from sds
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@sds
#### A masked pattern was here ####
-73
+76
PREHOOK: query: select param_key, param_value from sd_params order by param_key, param_value limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@sd_params
@@ -3325,8 +3473,8 @@ POSTHOOK: Input: sys@table_params
COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true","b":"true","c":"true","d":"true","e":"true","f":"true","g":"true"}}
COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"action_expression":"true","name":"true","rp_name":"true","trigger_expression":"true"}}
COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"add_time":"true","grant_option":"true","grantor":"true","grantor_type":"true","principal_name":"true","principal_type":"true","role_grant_id":"true","role_id":"true"}}
+COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"alloc_fraction":"true","path":"true","query_parallelism":"true","rp_name":"true","scheduling_policy":"true"}}
COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"avg_col_len":"true","big_decimal_high_value":"true","big_decimal_low_value":"true","column_name":"true","column_type":"true","cs_id":"true","db_name":"true","double_high_value":"true","double_low_value":"true","last_analyzed":"true","long_high_value":"true","long_low_value":"true","max_col_len":"true","num_distincts":"true","num_falses":"true","num_nulls":"true","num_trues":"true","part_id":"true","partition_name":"true","table_name":"true"}}
-COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"avg_col_len":"true","big_decimal_high_value":"true","big_decimal_low_value":"true","column_name":"true","column_type":"true","cs_id":"true","db_name":"true","double_high_value":"true","double_low_value":"true","last_analyzed":"true","long_high_value":"true","long_low_value":"true","max_col_len":"true","num_distincts":"true","num_falses":"true","num_nulls":"true","num_trues":"true","table_name":"true","tbl_id":"true"}}
PREHOOK: query: select tbl_name from tbls order by tbl_name limit 5
PREHOOK: type: QUERY
PREHOOK: Input: sys@tbls
@@ -3439,7 +3587,7 @@ POSTHOOK: Input: sys@table_params
POSTHOOK: Input: sys@table_stats_view
#### A masked pattern was here ####
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} 0 0 0 0
-{"BASIC_STATS":"true","COLUMN_STATS":{"action_expression":"true","name":"true","rp_name":"true","trigger_expression":"true"}} 0 0 0 0
+{"BASIC_STATS":"true","COLUMN_STATS":{"entity_name":"true","entity_type":"true","ordering":"true","pool_path":"true","rp_name":"true"}} 0 0 0 0
{"BASIC_STATS":"true","COLUMN_STATS":{"next_val":"true","sequence_name":"true"}} 0 0 0 0
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} 0 0 0 0
#### A masked pattern was here ####
@@ -3644,6 +3792,9 @@ default sys tbl_col_privs BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default sys tbl_privs BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default sys tbls BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default sys version BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
+default sys wm_mappings BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
+default sys wm_pools BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
+default sys wm_pools_to_triggers BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default sys wm_resourceplans BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
default sys wm_triggers BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL
PREHOOK: query: select * from TABLE_PRIVILEGES order by GRANTOR, GRANTEE, TABLE_SCHEMA, TABLE_NAME, PRIVILEGE_TYPE limit 10