You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/02/16 11:24:30 UTC
[1/5] hive git commit: HIVE-18387: Minimize time that REBUILD locks
the materialized view (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Repository: hive
Updated Branches:
refs/heads/master 01f34e49b -> 9e27ad08f
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 9382c60..f6fc346 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -297,6 +297,15 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
public function get_materialization_invalidation_info($dbname, array $tbl_names);
/**
* @param string $dbname
+ * @param string $tbl_name
+ * @param \metastore\CreationMetadata $creation_metadata
+ * @throws \metastore\MetaException
+ * @throws \metastore\InvalidOperationException
+ * @throws \metastore\UnknownDBException
+ */
+ public function update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata);
+ /**
+ * @param string $dbname
* @param string $filter
* @param int $max_tables
* @return string[]
@@ -3502,6 +3511,65 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
throw new \Exception("get_materialization_invalidation_info failed: unknown result");
}
+ public function update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata)
+ {
+ $this->send_update_creation_metadata($dbname, $tbl_name, $creation_metadata);
+ $this->recv_update_creation_metadata();
+ }
+
+ public function send_update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata)
+ {
+ $args = new \metastore\ThriftHiveMetastore_update_creation_metadata_args();
+ $args->dbname = $dbname;
+ $args->tbl_name = $tbl_name;
+ $args->creation_metadata = $creation_metadata;
+ $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'update_creation_metadata', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('update_creation_metadata', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
+
+ public function recv_update_creation_metadata()
+ {
+ $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_update_creation_metadata_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
+ }
+ $result = new \metastore\ThriftHiveMetastore_update_creation_metadata_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ if ($result->o3 !== null) {
+ throw $result->o3;
+ }
+ return;
+ }
+
public function get_table_names_by_filter($dbname, $filter, $max_tables)
{
$this->send_get_table_names_by_filter($dbname, $filter, $max_tables);
@@ -20209,6 +20277,259 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result {
}
+class ThriftHiveMetastore_update_creation_metadata_args {
+ static $_TSPEC;
+
+ /**
+ * @var string
+ */
+ public $dbname = null;
+ /**
+ * @var string
+ */
+ public $tbl_name = null;
+ /**
+ * @var \metastore\CreationMetadata
+ */
+ public $creation_metadata = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'dbname',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'tbl_name',
+ 'type' => TType::STRING,
+ ),
+ 3 => array(
+ 'var' => 'creation_metadata',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\CreationMetadata',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['dbname'])) {
+ $this->dbname = $vals['dbname'];
+ }
+ if (isset($vals['tbl_name'])) {
+ $this->tbl_name = $vals['tbl_name'];
+ }
+ if (isset($vals['creation_metadata'])) {
+ $this->creation_metadata = $vals['creation_metadata'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_update_creation_metadata_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->dbname);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->tbl_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRUCT) {
+ $this->creation_metadata = new \metastore\CreationMetadata();
+ $xfer += $this->creation_metadata->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_creation_metadata_args');
+ if ($this->dbname !== null) {
+ $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1);
+ $xfer += $output->writeString($this->dbname);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->tbl_name !== null) {
+ $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2);
+ $xfer += $output->writeString($this->tbl_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->creation_metadata !== null) {
+ if (!is_object($this->creation_metadata)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('creation_metadata', TType::STRUCT, 3);
+ $xfer += $this->creation_metadata->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_update_creation_metadata_result {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o1 = null;
+ /**
+ * @var \metastore\InvalidOperationException
+ */
+ public $o2 = null;
+ /**
+ * @var \metastore\UnknownDBException
+ */
+ public $o3 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\InvalidOperationException',
+ ),
+ 3 => array(
+ 'var' => 'o3',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\UnknownDBException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ if (isset($vals['o3'])) {
+ $this->o3 = $vals['o3'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_update_creation_metadata_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new \metastore\MetaException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new \metastore\InvalidOperationException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRUCT) {
+ $this->o3 = new \metastore\UnknownDBException();
+ $xfer += $this->o3->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_creation_metadata_result');
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o3 !== null) {
+ $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+ $xfer += $this->o3->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
class ThriftHiveMetastore_get_table_names_by_filter_args {
static $_TSPEC;
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
index a5b578e..e76eb24 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -20852,14 +20852,14 @@ class Materialization {
static $_TSPEC;
/**
- * @var \metastore\Table
- */
- public $materializationTable = null;
- /**
* @var string[]
*/
public $tablesUsed = null;
/**
+ * @var string
+ */
+ public $validTxnList = null;
+ /**
* @var int
*/
public $invalidationTime = null;
@@ -20868,11 +20868,6 @@ class Materialization {
if (!isset(self::$_TSPEC)) {
self::$_TSPEC = array(
1 => array(
- 'var' => 'materializationTable',
- 'type' => TType::STRUCT,
- 'class' => '\metastore\Table',
- ),
- 2 => array(
'var' => 'tablesUsed',
'type' => TType::SET,
'etype' => TType::STRING,
@@ -20880,6 +20875,10 @@ class Materialization {
'type' => TType::STRING,
),
),
+ 2 => array(
+ 'var' => 'validTxnList',
+ 'type' => TType::STRING,
+ ),
3 => array(
'var' => 'invalidationTime',
'type' => TType::I64,
@@ -20887,12 +20886,12 @@ class Materialization {
);
}
if (is_array($vals)) {
- if (isset($vals['materializationTable'])) {
- $this->materializationTable = $vals['materializationTable'];
- }
if (isset($vals['tablesUsed'])) {
$this->tablesUsed = $vals['tablesUsed'];
}
+ if (isset($vals['validTxnList'])) {
+ $this->validTxnList = $vals['validTxnList'];
+ }
if (isset($vals['invalidationTime'])) {
$this->invalidationTime = $vals['invalidationTime'];
}
@@ -20919,14 +20918,6 @@ class Materialization {
switch ($fid)
{
case 1:
- if ($ftype == TType::STRUCT) {
- $this->materializationTable = new \metastore\Table();
- $xfer += $this->materializationTable->read($input);
- } else {
- $xfer += $input->skip($ftype);
- }
- break;
- case 2:
if ($ftype == TType::SET) {
$this->tablesUsed = array();
$_size660 = 0;
@@ -20947,6 +20938,13 @@ class Materialization {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validTxnList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
case 3:
if ($ftype == TType::I64) {
$xfer += $input->readI64($this->invalidationTime);
@@ -20967,19 +20965,11 @@ class Materialization {
public function write($output) {
$xfer = 0;
$xfer += $output->writeStructBegin('Materialization');
- if ($this->materializationTable !== null) {
- if (!is_object($this->materializationTable)) {
- throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
- }
- $xfer += $output->writeFieldBegin('materializationTable', TType::STRUCT, 1);
- $xfer += $this->materializationTable->write($output);
- $xfer += $output->writeFieldEnd();
- }
if ($this->tablesUsed !== null) {
if (!is_array($this->tablesUsed)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
}
- $xfer += $output->writeFieldBegin('tablesUsed', TType::SET, 2);
+ $xfer += $output->writeFieldBegin('tablesUsed', TType::SET, 1);
{
$output->writeSetBegin(TType::STRING, count($this->tablesUsed));
{
@@ -20996,6 +20986,11 @@ class Materialization {
}
$xfer += $output->writeFieldEnd();
}
+ if ($this->validTxnList !== null) {
+ $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 2);
+ $xfer += $output->writeString($this->validTxnList);
+ $xfer += $output->writeFieldEnd();
+ }
if ($this->invalidationTime !== null) {
$xfer += $output->writeFieldBegin('invalidationTime', TType::I64, 3);
$xfer += $output->writeI64($this->invalidationTime);
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 9b2aaff..c958e97 100755
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -61,6 +61,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print(' GetTableResult get_table_req(GetTableRequest req)')
print(' GetTablesResult get_table_objects_by_name_req(GetTablesRequest req)')
print(' get_materialization_invalidation_info(string dbname, tbl_names)')
+ print(' void update_creation_metadata(string dbname, string tbl_name, CreationMetadata creation_metadata)')
print(' get_table_names_by_filter(string dbname, string filter, i16 max_tables)')
print(' void alter_table(string dbname, string tbl_name, Table new_tbl)')
print(' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)')
@@ -496,6 +497,12 @@ elif cmd == 'get_materialization_invalidation_info':
sys.exit(1)
pp.pprint(client.get_materialization_invalidation_info(args[0],eval(args[1]),))
+elif cmd == 'update_creation_metadata':
+ if len(args) != 3:
+ print('update_creation_metadata requires 3 args')
+ sys.exit(1)
+ pp.pprint(client.update_creation_metadata(args[0],args[1],eval(args[2]),))
+
elif cmd == 'get_table_names_by_filter':
if len(args) != 3:
print('get_table_names_by_filter requires 3 args')
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 2e19105..330e75f 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -308,6 +308,15 @@ class Iface(fb303.FacebookService.Iface):
"""
pass
+ def update_creation_metadata(self, dbname, tbl_name, creation_metadata):
+ """
+ Parameters:
+ - dbname
+ - tbl_name
+ - creation_metadata
+ """
+ pass
+
def get_table_names_by_filter(self, dbname, filter, max_tables):
"""
Parameters:
@@ -2759,6 +2768,45 @@ class Client(fb303.FacebookService.Client, Iface):
raise result.o3
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result")
+ def update_creation_metadata(self, dbname, tbl_name, creation_metadata):
+ """
+ Parameters:
+ - dbname
+ - tbl_name
+ - creation_metadata
+ """
+ self.send_update_creation_metadata(dbname, tbl_name, creation_metadata)
+ self.recv_update_creation_metadata()
+
+ def send_update_creation_metadata(self, dbname, tbl_name, creation_metadata):
+ self._oprot.writeMessageBegin('update_creation_metadata', TMessageType.CALL, self._seqid)
+ args = update_creation_metadata_args()
+ args.dbname = dbname
+ args.tbl_name = tbl_name
+ args.creation_metadata = creation_metadata
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_update_creation_metadata(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = update_creation_metadata_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ if result.o3 is not None:
+ raise result.o3
+ return
+
def get_table_names_by_filter(self, dbname, filter, max_tables):
"""
Parameters:
@@ -7999,6 +8047,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
self._processMap["get_table_req"] = Processor.process_get_table_req
self._processMap["get_table_objects_by_name_req"] = Processor.process_get_table_objects_by_name_req
self._processMap["get_materialization_invalidation_info"] = Processor.process_get_materialization_invalidation_info
+ self._processMap["update_creation_metadata"] = Processor.process_update_creation_metadata
self._processMap["get_table_names_by_filter"] = Processor.process_get_table_names_by_filter
self._processMap["alter_table"] = Processor.process_alter_table
self._processMap["alter_table_with_environment_context"] = Processor.process_alter_table_with_environment_context
@@ -9090,6 +9139,34 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_update_creation_metadata(self, seqid, iprot, oprot):
+ args = update_creation_metadata_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = update_creation_metadata_result()
+ try:
+ self._handler.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata)
+ msg_type = TMessageType.REPLY
+ except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+ raise
+ except MetaException as o1:
+ msg_type = TMessageType.REPLY
+ result.o1 = o1
+ except InvalidOperationException as o2:
+ msg_type = TMessageType.REPLY
+ result.o2 = o2
+ except UnknownDBException as o3:
+ msg_type = TMessageType.REPLY
+ result.o3 = o3
+ except Exception as ex:
+ msg_type = TMessageType.EXCEPTION
+ logging.exception(ex)
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("update_creation_metadata", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_get_table_names_by_filter(self, seqid, iprot, oprot):
args = get_table_names_by_filter_args()
args.read(iprot)
@@ -18869,6 +18946,192 @@ class get_materialization_invalidation_info_result:
def __ne__(self, other):
return not (self == other)
+class update_creation_metadata_args:
+ """
+ Attributes:
+ - dbname
+ - tbl_name
+ - creation_metadata
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'dbname', None, None, ), # 1
+ (2, TType.STRING, 'tbl_name', None, None, ), # 2
+ (3, TType.STRUCT, 'creation_metadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 3
+ )
+
+ def __init__(self, dbname=None, tbl_name=None, creation_metadata=None,):
+ self.dbname = dbname
+ self.tbl_name = tbl_name
+ self.creation_metadata = creation_metadata
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.dbname = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.tbl_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.creation_metadata = CreationMetadata()
+ self.creation_metadata.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('update_creation_metadata_args')
+ if self.dbname is not None:
+ oprot.writeFieldBegin('dbname', TType.STRING, 1)
+ oprot.writeString(self.dbname)
+ oprot.writeFieldEnd()
+ if self.tbl_name is not None:
+ oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
+ oprot.writeString(self.tbl_name)
+ oprot.writeFieldEnd()
+ if self.creation_metadata is not None:
+ oprot.writeFieldBegin('creation_metadata', TType.STRUCT, 3)
+ self.creation_metadata.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.dbname)
+ value = (value * 31) ^ hash(self.tbl_name)
+ value = (value * 31) ^ hash(self.creation_metadata)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class update_creation_metadata_result:
+ """
+ Attributes:
+ - o1
+ - o2
+ - o3
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2
+ (3, TType.STRUCT, 'o3', (UnknownDBException, UnknownDBException.thrift_spec), None, ), # 3
+ )
+
+ def __init__(self, o1=None, o2=None, o3=None,):
+ self.o1 = o1
+ self.o2 = o2
+ self.o3 = o3
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o2 = InvalidOperationException()
+ self.o2.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.o3 = UnknownDBException()
+ self.o3.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('update_creation_metadata_result')
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o2 is not None:
+ oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+ self.o2.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o3 is not None:
+ oprot.writeFieldBegin('o3', TType.STRUCT, 3)
+ self.o3.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.o1)
+ value = (value * 31) ^ hash(self.o2)
+ value = (value * 31) ^ hash(self.o3)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class get_table_names_by_filter_args:
"""
Attributes:
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 5598859..9a504e1 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -14703,21 +14703,21 @@ class TableMeta:
class Materialization:
"""
Attributes:
- - materializationTable
- tablesUsed
+ - validTxnList
- invalidationTime
"""
thrift_spec = (
None, # 0
- (1, TType.STRUCT, 'materializationTable', (Table, Table.thrift_spec), None, ), # 1
- (2, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 2
+ (1, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 1
+ (2, TType.STRING, 'validTxnList', None, None, ), # 2
(3, TType.I64, 'invalidationTime', None, None, ), # 3
)
- def __init__(self, materializationTable=None, tablesUsed=None, invalidationTime=None,):
- self.materializationTable = materializationTable
+ def __init__(self, tablesUsed=None, validTxnList=None, invalidationTime=None,):
self.tablesUsed = tablesUsed
+ self.validTxnList = validTxnList
self.invalidationTime = invalidationTime
def read(self, iprot):
@@ -14730,12 +14730,6 @@ class Materialization:
if ftype == TType.STOP:
break
if fid == 1:
- if ftype == TType.STRUCT:
- self.materializationTable = Table()
- self.materializationTable.read(iprot)
- else:
- iprot.skip(ftype)
- elif fid == 2:
if ftype == TType.SET:
self.tablesUsed = set()
(_etype660, _size657) = iprot.readSetBegin()
@@ -14745,6 +14739,11 @@ class Materialization:
iprot.readSetEnd()
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.validTxnList = iprot.readString()
+ else:
+ iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.invalidationTime = iprot.readI64()
@@ -14760,17 +14759,17 @@ class Materialization:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Materialization')
- if self.materializationTable is not None:
- oprot.writeFieldBegin('materializationTable', TType.STRUCT, 1)
- self.materializationTable.write(oprot)
- oprot.writeFieldEnd()
if self.tablesUsed is not None:
- oprot.writeFieldBegin('tablesUsed', TType.SET, 2)
+ oprot.writeFieldBegin('tablesUsed', TType.SET, 1)
oprot.writeSetBegin(TType.STRING, len(self.tablesUsed))
for iter663 in self.tablesUsed:
oprot.writeString(iter663)
oprot.writeSetEnd()
oprot.writeFieldEnd()
+ if self.validTxnList is not None:
+ oprot.writeFieldBegin('validTxnList', TType.STRING, 2)
+ oprot.writeString(self.validTxnList)
+ oprot.writeFieldEnd()
if self.invalidationTime is not None:
oprot.writeFieldBegin('invalidationTime', TType.I64, 3)
oprot.writeI64(self.invalidationTime)
@@ -14779,8 +14778,6 @@ class Materialization:
oprot.writeStructEnd()
def validate(self):
- if self.materializationTable is None:
- raise TProtocol.TProtocolException(message='Required field materializationTable is unset!')
if self.tablesUsed is None:
raise TProtocol.TProtocolException(message='Required field tablesUsed is unset!')
if self.invalidationTime is None:
@@ -14790,8 +14787,8 @@ class Materialization:
def __hash__(self):
value = 17
- value = (value * 31) ^ hash(self.materializationTable)
value = (value * 31) ^ hash(self.tablesUsed)
+ value = (value * 31) ^ hash(self.validTxnList)
value = (value * 31) ^ hash(self.invalidationTime)
return value
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index bc58cfe..5faf5ea 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -3316,20 +3316,19 @@ end
class Materialization
include ::Thrift::Struct, ::Thrift::Struct_Union
- MATERIALIZATIONTABLE = 1
- TABLESUSED = 2
+ TABLESUSED = 1
+ VALIDTXNLIST = 2
INVALIDATIONTIME = 3
FIELDS = {
- MATERIALIZATIONTABLE => {:type => ::Thrift::Types::STRUCT, :name => 'materializationTable', :class => ::Table},
TABLESUSED => {:type => ::Thrift::Types::SET, :name => 'tablesUsed', :element => {:type => ::Thrift::Types::STRING}},
+ VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList', :optional => true},
INVALIDATIONTIME => {:type => ::Thrift::Types::I64, :name => 'invalidationTime'}
}
def struct_fields; FIELDS; end
def validate
- raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field materializationTable is unset!') unless @materializationTable
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablesUsed is unset!') unless @tablesUsed
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field invalidationTime is unset!') unless @invalidationTime
end
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index ec88131..640499e 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -628,6 +628,23 @@ module ThriftHiveMetastore
raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_materialization_invalidation_info failed: unknown result')
end
+ def update_creation_metadata(dbname, tbl_name, creation_metadata)
+ send_update_creation_metadata(dbname, tbl_name, creation_metadata)
+ recv_update_creation_metadata()
+ end
+
+ def send_update_creation_metadata(dbname, tbl_name, creation_metadata)
+ send_message('update_creation_metadata', Update_creation_metadata_args, :dbname => dbname, :tbl_name => tbl_name, :creation_metadata => creation_metadata)
+ end
+
+ def recv_update_creation_metadata()
+ result = receive_message(Update_creation_metadata_result)
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ return
+ end
+
def get_table_names_by_filter(dbname, filter, max_tables)
send_get_table_names_by_filter(dbname, filter, max_tables)
return recv_get_table_names_by_filter()
@@ -3519,6 +3536,21 @@ module ThriftHiveMetastore
write_result(result, oprot, 'get_materialization_invalidation_info', seqid)
end
+ def process_update_creation_metadata(seqid, iprot, oprot)
+ args = read_args(iprot, Update_creation_metadata_args)
+ result = Update_creation_metadata_result.new()
+ begin
+ @handler.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata)
+ rescue ::MetaException => o1
+ result.o1 = o1
+ rescue ::InvalidOperationException => o2
+ result.o2 = o2
+ rescue ::UnknownDBException => o3
+ result.o3 = o3
+ end
+ write_result(result, oprot, 'update_creation_metadata', seqid)
+ end
+
def process_get_table_names_by_filter(seqid, iprot, oprot)
args = read_args(iprot, Get_table_names_by_filter_args)
result = Get_table_names_by_filter_result.new()
@@ -6668,6 +6700,46 @@ module ThriftHiveMetastore
::Thrift::Struct.generate_accessors self
end
+ class Update_creation_metadata_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ DBNAME = 1
+ TBL_NAME = 2
+ CREATION_METADATA = 3
+
+ FIELDS = {
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
+ TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
+ CREATION_METADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creation_metadata', :class => ::CreationMetadata}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Update_creation_metadata_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ O1 = 1
+ O2 = 2
+ O3 = 3
+
+ FIELDS = {
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::UnknownDBException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
class Get_table_names_by_filter_args
include ::Thrift::Struct, ::Thrift::Struct_Union
DBNAME = 1
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 1f99828..47de215 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2518,6 +2518,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
return MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(dbName, tableNames);
}
+ @Override
+ public void update_creation_metadata(final String dbName, final String tableName, CreationMetadata cm) throws MetaException {
+ getMS().updateCreationMetadata(dbName, tableName, cm);
+ }
+
private void assertClientHasCapability(ClientCapabilities client,
ClientCapability value, String what, String call) throws MetaException {
if (!doesClientHaveCapability(client, value)) {
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 23cef8d..5b62114 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1455,6 +1455,13 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
/** {@inheritDoc} */
@Override
+ public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+ throws MetaException, InvalidOperationException, UnknownDBException, TException {
+ client.update_creation_metadata(dbName, tableName, cm);
+ }
+
+ /** {@inheritDoc} */
+ @Override
public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
throws MetaException, TException, InvalidOperationException, UnknownDBException {
return filterHook.filterTableNames(dbName,
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 96d4590..143b04f 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.CompactionResponse;
import org.apache.hadoop.hive.metastore.api.CompactionType;
import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
import org.apache.hadoop.hive.metastore.api.DataOperationType;
import org.apache.hadoop.hive.metastore.api.Database;
@@ -450,6 +451,12 @@ public interface IMetaStoreClient {
throws MetaException, InvalidOperationException, UnknownDBException, TException;
/**
+ * Updates the creation metadata for the materialized view.
+ */
+ void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+ throws MetaException, TException;
+
+ /**
* @param tableName
* @param dbName
* @param partVals
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationInvalidationInfo.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationInvalidationInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationInvalidationInfo.java
index f787dd4..3d77407 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationInvalidationInfo.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationInvalidationInfo.java
@@ -35,8 +35,9 @@ public class MaterializationInvalidationInfo extends Materialization {
private AtomicLong invalidationTime;
- public MaterializationInvalidationInfo(Table materializationTable, Set<String> tablesUsed) {
- super(materializationTable, tablesUsed, 0);
+ public MaterializationInvalidationInfo(Set<String> tablesUsed, String validTxnList) {
+ super(tablesUsed, 0);
+ this.setValidTxnList(validTxnList);
this.invalidationTime = new AtomicLong(0);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java
index 20e4e8d..92653ae 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hive.metastore;
-import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -115,7 +114,8 @@ public final class MaterializationsInvalidationCache {
try {
for (String dbName : store.getAllDatabases()) {
for (Table mv : store.getTableObjectsByName(dbName, store.getTables(dbName, null, TableType.MATERIALIZED_VIEW))) {
- addMaterializedView(mv, ImmutableSet.copyOf(mv.getCreationMetadata().getTablesUsed()), OpType.LOAD);
+ addMaterializedView(mv.getDbName(), mv.getTableName(), ImmutableSet.copyOf(mv.getCreationMetadata().getTablesUsed()),
+ mv.getCreationMetadata().getValidTxnList(), OpType.LOAD);
}
}
LOG.info("Initialized materializations invalidation cache");
@@ -128,52 +128,60 @@ public final class MaterializationsInvalidationCache {
/**
* Adds a newly created materialized view to the cache.
*
- * @param materializedViewTable the materialized view
+ * @param dbName
+ * @param tableName
* @param tablesUsed tables used by the materialized view
+ * @param validTxnList
*/
- public void createMaterializedView(Table materializedViewTable, Set<String> tablesUsed) {
- addMaterializedView(materializedViewTable, tablesUsed, OpType.CREATE);
+ public void createMaterializedView(String dbName, String tableName, Set<String> tablesUsed,
+ String validTxnList) {
+ addMaterializedView(dbName, tableName, tablesUsed, validTxnList, OpType.CREATE);
}
/**
* Method to call when materialized view is modified.
*
- * @param materializedViewTable the materialized view
+ * @param dbName
+ * @param tableName
* @param tablesUsed tables used by the materialized view
+ * @param validTxnList
*/
- public void alterMaterializedView(Table materializedViewTable, Set<String> tablesUsed) {
- addMaterializedView(materializedViewTable, tablesUsed, OpType.ALTER);
+ public void alterMaterializedView(String dbName, String tableName, Set<String> tablesUsed,
+ String validTxnList) {
+ addMaterializedView(dbName, tableName, tablesUsed, validTxnList, OpType.ALTER);
}
/**
* Adds the materialized view to the cache.
*
- * @param materializedViewTable the materialized view
+ * @param dbName
+ * @param tableName
* @param tablesUsed tables used by the materialized view
+ * @param validTxnList
+ * @param opType
*/
- private void addMaterializedView(Table materializedViewTable, Set<String> tablesUsed, OpType opType) {
+ private void addMaterializedView(String dbName, String tableName, Set<String> tablesUsed,
+ String validTxnList, OpType opType) {
// We are going to create the map for each view in the given database
ConcurrentMap<String, MaterializationInvalidationInfo> cq =
new ConcurrentHashMap<String, MaterializationInvalidationInfo>();
final ConcurrentMap<String, MaterializationInvalidationInfo> prevCq = materializations.putIfAbsent(
- materializedViewTable.getDbName(), cq);
+ dbName, cq);
if (prevCq != null) {
cq = prevCq;
}
// Start the process to add materialization to the cache
// Before loading the materialization in the cache, we need to update some
// important information in the registry to account for rewriting invalidation
- String txnListString = materializedViewTable.getCreationMetadata().getValidTxnList();
- if (txnListString == null) {
+ if (validTxnList == null) {
// This can happen when the materialized view was created on non-transactional tables
return;
}
if (opType == OpType.CREATE || opType == OpType.ALTER) {
// You store the materialized view
- cq.put(materializedViewTable.getTableName(),
- new MaterializationInvalidationInfo(materializedViewTable, tablesUsed));
+ cq.put(tableName, new MaterializationInvalidationInfo(tablesUsed, validTxnList));
} else {
- ValidTxnList txnList = new ValidReadTxnList(txnListString);
+ ValidTxnList txnList = new ValidReadTxnList(validTxnList);
for (String qNameTableUsed : tablesUsed) {
// First we insert a new tree set to keep table modifications, unless it already exists
ConcurrentSkipListMap<Long, Long> modificationsTree =
@@ -197,19 +205,17 @@ public final class MaterializationsInvalidationCache {
continue;
}
} catch (MetaException ex) {
- LOG.debug("Materialized view " +
- Warehouse.getQualifiedName(materializedViewTable.getDbName(), materializedViewTable.getTableName()) +
+ LOG.debug("Materialized view " + Warehouse.getQualifiedName(dbName, tableName) +
" ignored; error loading view into invalidation cache", ex);
return;
}
}
// For LOAD, you only add it if it does exist as you might be loading an outdated MV
- cq.putIfAbsent(materializedViewTable.getTableName(),
- new MaterializationInvalidationInfo(materializedViewTable, tablesUsed));
+ cq.putIfAbsent(tableName, new MaterializationInvalidationInfo(tablesUsed, validTxnList));
}
if (LOG.isDebugEnabled()) {
LOG.debug("Cached materialized view for rewriting in invalidation cache: " +
- Warehouse.getQualifiedName(materializedViewTable.getDbName(), materializedViewTable.getTableName()));
+ Warehouse.getQualifiedName(dbName, tableName));
}
}
@@ -236,12 +242,9 @@ public final class MaterializationsInvalidationCache {
/**
* Removes the materialized view from the cache.
*
- * @param materializedViewTable the materialized view to remove
+ * @param dbName
+ * @param tableName
*/
- public void dropMaterializedView(Table materializedViewTable) {
- dropMaterializedView(materializedViewTable.getDbName(), materializedViewTable.getTableName());
- }
-
public void dropMaterializedView(String dbName, String tableName) {
materializations.get(dbName).remove(tableName);
}
@@ -292,7 +295,7 @@ public final class MaterializationsInvalidationCache {
}
private long getInvalidationTime(MaterializationInvalidationInfo materialization) {
- String txnListString = materialization.getMaterializationTable().getCreationMetadata().getValidTxnList();
+ String txnListString = materialization.getValidTxnList();
if (txnListString == null) {
// This can happen when the materialization was created on non-transactional tables
return Long.MIN_VALUE;
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index edabaa1..7b44df4 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1152,7 +1152,8 @@ public class ObjectStore implements RawStore, Configurable {
if (MetaStoreUtils.isMaterializedViewTable(tbl)) {
// Add to the invalidation cache
MaterializationsInvalidationCache.get().createMaterializedView(
- tbl, tbl.getCreationMetadata().getTablesUsed());
+ tbl.getDbName(), tbl.getTableName(), tbl.getCreationMetadata().getTablesUsed(),
+ tbl.getCreationMetadata().getValidTxnList());
}
}
}
@@ -3738,28 +3739,38 @@ public class ObjectStore implements RawStore, Configurable {
oldt.setViewOriginalText(newt.getViewOriginalText());
oldt.setViewExpandedText(newt.getViewExpandedText());
oldt.setRewriteEnabled(newt.isRewriteEnabled());
- registerCreationSignature = newTable.getCreationMetadata() != null;
- if (registerCreationSignature) {
- // Update creation metadata
- MCreationMetadata newMcm = convertToMCreationMetadata(
- newTable.getCreationMetadata());
- MCreationMetadata mcm = getCreationMetadata(dbname, name);
- mcm.setTables(newMcm.getTables());
- mcm.setTxnList(newMcm.getTxnList());
+
+ // commit the changes
+ success = commitTransaction();
+ } finally {
+ if (!success) {
+ rollbackTransaction();
}
+ }
+ }
+ @Override
+ public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm)
+ throws MetaException {
+ boolean success = false;
+ try {
+ openTransaction();
+ dbname = normalizeIdentifier(dbname);
+ tablename = normalizeIdentifier(tablename);
+ // Update creation metadata
+ MCreationMetadata newMcm = convertToMCreationMetadata(cm);
+ MCreationMetadata mcm = getCreationMetadata(dbname, tablename);
+ mcm.setTables(newMcm.getTables());
+ mcm.setTxnList(newMcm.getTxnList());
// commit the changes
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
} else {
- if (MetaStoreUtils.isMaterializedViewTable(newTable) &&
- registerCreationSignature) {
- // Add to the invalidation cache if the creation signature has changed
- MaterializationsInvalidationCache.get().alterMaterializedView(
- newTable, newTable.getCreationMetadata().getTablesUsed());
- }
+ // Add to the invalidation cache if the creation signature has changed
+ MaterializationsInvalidationCache.get().alterMaterializedView(
+ dbname, tablename, cm.getTablesUsed(), cm.getValidTxnList());
}
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index e4e7d42..f500d63 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.metastore;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import java.lang.annotation.ElementType;
@@ -172,6 +173,9 @@ public interface RawStore extends Configurable {
void alterTable(String dbname, String name, Table newTable)
throws InvalidObjectException, MetaException;
+ void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm)
+ throws MetaException;
+
List<String> getTables(String dbName, String pattern)
throws MetaException;
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 80aa3bc..0d132f2 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hive.metastore.cache;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import java.nio.ByteBuffer;
@@ -1175,6 +1176,12 @@ public class CachedStore implements RawStore, Configurable {
}
@Override
+ public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm)
+ throws MetaException {
+ rawStore.updateCreationMetadata(dbname, tablename, cm);
+ }
+
+ @Override
public List<String> getTables(String dbName, String pattern) throws MetaException {
if (!isBlacklistWhitelistEmpty(conf) || !sharedCacheWrapper.isInitialized()) {
return rawStore.getTables(dbName, pattern);
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift
index 371b975..35fc8b3 100644
--- a/standalone-metastore/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift
@@ -1049,8 +1049,8 @@ struct TableMeta {
}
struct Materialization {
- 1: required Table materializationTable;
- 2: required set<string> tablesUsed;
+ 1: required set<string> tablesUsed;
+ 2: optional string validTxnList
3: required i64 invalidationTime;
}
@@ -1420,6 +1420,8 @@ service ThriftHiveMetastore extends fb303.FacebookService
throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
map<string, Materialization> get_materialization_invalidation_info(1:string dbname, 2:list<string> tbl_names)
throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
+ void update_creation_metadata(1:string dbname, 2:string tbl_name, 3:CreationMetadata creation_metadata)
+ throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
// Get a list of table names that match a filter.
// The filter operators are LIKE, <, <=, >, >=, =, <>
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 9100c73..75ea8c4 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.metastore;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import java.nio.ByteBuffer;
@@ -241,6 +242,12 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm)
+ throws MetaException {
+ objectStore.updateCreationMetadata(dbname, tablename, cm);
+ }
+
+ @Override
public List<String> getTables(String dbName, String pattern) throws MetaException {
return objectStore.getTables(dbName, pattern);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 86e72d8..207d842 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.metastore;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import java.nio.ByteBuffer;
@@ -240,6 +241,11 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm)
+ throws MetaException {
+ }
+
+ @Override
public List<String> getTables(String dbName, String pattern) throws MetaException {
return Collections.emptyList();
[2/5] hive git commit: HIVE-18387: Minimize time that REBUILD locks
the materialized view (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index d5e3527..05064cb 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -116,6 +116,8 @@ import org.slf4j.LoggerFactory;
public Map<String,Materialization> get_materialization_invalidation_info(String dbname, List<String> tbl_names) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
+ public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
+
public List<String> get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
@@ -482,6 +484,8 @@ import org.slf4j.LoggerFactory;
public void get_materialization_invalidation_info(String dbname, List<String> tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void alter_table(String dbname, String tbl_name, Table new_tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -1855,6 +1859,37 @@ import org.slf4j.LoggerFactory;
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result");
}
+ public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
+ {
+ send_update_creation_metadata(dbname, tbl_name, creation_metadata);
+ recv_update_creation_metadata();
+ }
+
+ public void send_update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata) throws org.apache.thrift.TException
+ {
+ update_creation_metadata_args args = new update_creation_metadata_args();
+ args.setDbname(dbname);
+ args.setTbl_name(tbl_name);
+ args.setCreation_metadata(creation_metadata);
+ sendBase("update_creation_metadata", args);
+ }
+
+ public void recv_update_creation_metadata() throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
+ {
+ update_creation_metadata_result result = new update_creation_metadata_result();
+ receiveBase(result, "update_creation_metadata");
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ if (result.o3 != null) {
+ throw result.o3;
+ }
+ return;
+ }
+
public List<String> get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
{
send_get_table_names_by_filter(dbname, filter, max_tables);
@@ -7323,6 +7358,44 @@ import org.slf4j.LoggerFactory;
}
}
+ public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ update_creation_metadata_call method_call = new update_creation_metadata_call(dbname, tbl_name, creation_metadata, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private String dbname;
+ private String tbl_name;
+ private CreationMetadata creation_metadata;
+ public update_creation_metadata_call(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.dbname = dbname;
+ this.tbl_name = tbl_name;
+ this.creation_metadata = creation_metadata;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_creation_metadata", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ update_creation_metadata_args args = new update_creation_metadata_args();
+ args.setDbname(dbname);
+ args.setTbl_name(tbl_name);
+ args.setCreation_metadata(creation_metadata);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public void getResult() throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ (new Client(prot)).recv_update_creation_metadata();
+ }
+ }
+
public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
get_table_names_by_filter_call method_call = new get_table_names_by_filter_call(dbname, filter, max_tables, resultHandler, this, ___protocolFactory, ___transport);
@@ -12395,6 +12468,7 @@ import org.slf4j.LoggerFactory;
processMap.put("get_table_req", new get_table_req());
processMap.put("get_table_objects_by_name_req", new get_table_objects_by_name_req());
processMap.put("get_materialization_invalidation_info", new get_materialization_invalidation_info());
+ processMap.put("update_creation_metadata", new update_creation_metadata());
processMap.put("get_table_names_by_filter", new get_table_names_by_filter());
processMap.put("alter_table", new alter_table());
processMap.put("alter_table_with_environment_context", new alter_table_with_environment_context());
@@ -13508,6 +13582,34 @@ import org.slf4j.LoggerFactory;
}
}
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata<I extends Iface> extends org.apache.thrift.ProcessFunction<I, update_creation_metadata_args> {
+ public update_creation_metadata() {
+ super("update_creation_metadata");
+ }
+
+ public update_creation_metadata_args getEmptyArgsInstance() {
+ return new update_creation_metadata_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public update_creation_metadata_result getResult(I iface, update_creation_metadata_args args) throws org.apache.thrift.TException {
+ update_creation_metadata_result result = new update_creation_metadata_result();
+ try {
+ iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata);
+ } catch (MetaException o1) {
+ result.o1 = o1;
+ } catch (InvalidOperationException o2) {
+ result.o2 = o2;
+ } catch (UnknownDBException o3) {
+ result.o3 = o3;
+ }
+ return result;
+ }
+ }
+
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_names_by_filter<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_table_names_by_filter_args> {
public get_table_names_by_filter() {
super("get_table_names_by_filter");
@@ -17236,6 +17338,7 @@ import org.slf4j.LoggerFactory;
processMap.put("get_table_req", new get_table_req());
processMap.put("get_table_objects_by_name_req", new get_table_objects_by_name_req());
processMap.put("get_materialization_invalidation_info", new get_materialization_invalidation_info());
+ processMap.put("update_creation_metadata", new update_creation_metadata());
processMap.put("get_table_names_by_filter", new get_table_names_by_filter());
processMap.put("alter_table", new alter_table());
processMap.put("alter_table_with_environment_context", new alter_table_with_environment_context());
@@ -19673,21 +19776,20 @@ import org.slf4j.LoggerFactory;
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_names_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_names_by_filter_args, List<String>> {
- public get_table_names_by_filter() {
- super("get_table_names_by_filter");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, update_creation_metadata_args, Void> {
+ public update_creation_metadata() {
+ super("update_creation_metadata");
}
- public get_table_names_by_filter_args getEmptyArgsInstance() {
- return new get_table_names_by_filter_args();
+ public update_creation_metadata_args getEmptyArgsInstance() {
+ return new update_creation_metadata_args();
}
- public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<List<String>>() {
- public void onComplete(List<String> o) {
- get_table_names_by_filter_result result = new get_table_names_by_filter_result();
- result.success = o;
+ return new AsyncMethodCallback<Void>() {
+ public void onComplete(Void o) {
+ update_creation_metadata_result result = new update_creation_metadata_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -19699,7 +19801,7 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- get_table_names_by_filter_result result = new get_table_names_by_filter_result();
+ update_creation_metadata_result result = new update_creation_metadata_result();
if (e instanceof MetaException) {
result.o1 = (MetaException) e;
result.setO1IsSet(true);
@@ -19735,208 +19837,25 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
- iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler);
+ public void start(I iface, update_creation_metadata_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_args, Void> {
- public alter_table() {
- super("alter_table");
- }
-
- public alter_table_args getEmptyArgsInstance() {
- return new alter_table_args();
- }
-
- public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
- final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Void>() {
- public void onComplete(Void o) {
- alter_table_result result = new alter_table_result();
- try {
- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
- return;
- } catch (Exception e) {
- LOGGER.error("Exception writing to internal frame buffer", e);
- }
- fb.close();
- }
- public void onError(Exception e) {
- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
- org.apache.thrift.TBase msg;
- alter_table_result result = new alter_table_result();
- if (e instanceof InvalidOperationException) {
- result.o1 = (InvalidOperationException) e;
- result.setO1IsSet(true);
- msg = result;
- }
- else if (e instanceof MetaException) {
- result.o2 = (MetaException) e;
- result.setO2IsSet(true);
- msg = result;
- }
- else
- {
- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
- }
- try {
- fcall.sendResponse(fb,msg,msgType,seqid);
- return;
- } catch (Exception ex) {
- LOGGER.error("Exception writing to internal frame buffer", ex);
- }
- fb.close();
- }
- };
- }
-
- protected boolean isOneway() {
- return false;
- }
-
- public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler);
- }
- }
-
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_environment_context_args, Void> {
- public alter_table_with_environment_context() {
- super("alter_table_with_environment_context");
- }
-
- public alter_table_with_environment_context_args getEmptyArgsInstance() {
- return new alter_table_with_environment_context_args();
- }
-
- public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
- final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Void>() {
- public void onComplete(Void o) {
- alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
- try {
- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
- return;
- } catch (Exception e) {
- LOGGER.error("Exception writing to internal frame buffer", e);
- }
- fb.close();
- }
- public void onError(Exception e) {
- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
- org.apache.thrift.TBase msg;
- alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
- if (e instanceof InvalidOperationException) {
- result.o1 = (InvalidOperationException) e;
- result.setO1IsSet(true);
- msg = result;
- }
- else if (e instanceof MetaException) {
- result.o2 = (MetaException) e;
- result.setO2IsSet(true);
- msg = result;
- }
- else
- {
- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
- }
- try {
- fcall.sendResponse(fb,msg,msgType,seqid);
- return;
- } catch (Exception ex) {
- LOGGER.error("Exception writing to internal frame buffer", ex);
- }
- fb.close();
- }
- };
- }
-
- protected boolean isOneway() {
- return false;
- }
-
- public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler);
- }
- }
-
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_cascade<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_cascade_args, Void> {
- public alter_table_with_cascade() {
- super("alter_table_with_cascade");
- }
-
- public alter_table_with_cascade_args getEmptyArgsInstance() {
- return new alter_table_with_cascade_args();
- }
-
- public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
- final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Void>() {
- public void onComplete(Void o) {
- alter_table_with_cascade_result result = new alter_table_with_cascade_result();
- try {
- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
- return;
- } catch (Exception e) {
- LOGGER.error("Exception writing to internal frame buffer", e);
- }
- fb.close();
- }
- public void onError(Exception e) {
- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
- org.apache.thrift.TBase msg;
- alter_table_with_cascade_result result = new alter_table_with_cascade_result();
- if (e instanceof InvalidOperationException) {
- result.o1 = (InvalidOperationException) e;
- result.setO1IsSet(true);
- msg = result;
- }
- else if (e instanceof MetaException) {
- result.o2 = (MetaException) e;
- result.setO2IsSet(true);
- msg = result;
- }
- else
- {
- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
- }
- try {
- fcall.sendResponse(fb,msg,msgType,seqid);
- return;
- } catch (Exception ex) {
- LOGGER.error("Exception writing to internal frame buffer", ex);
- }
- fb.close();
- }
- };
- }
-
- protected boolean isOneway() {
- return false;
- }
-
- public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler);
- }
- }
-
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_args, Partition> {
- public add_partition() {
- super("add_partition");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_names_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_names_by_filter_args, List<String>> {
+ public get_table_names_by_filter() {
+ super("get_table_names_by_filter");
}
- public add_partition_args getEmptyArgsInstance() {
- return new add_partition_args();
+ public get_table_names_by_filter_args getEmptyArgsInstance() {
+ return new get_table_names_by_filter_args();
}
- public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Partition>() {
- public void onComplete(Partition o) {
- add_partition_result result = new add_partition_result();
+ return new AsyncMethodCallback<List<String>>() {
+ public void onComplete(List<String> o) {
+ get_table_names_by_filter_result result = new get_table_names_by_filter_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -19949,19 +19868,19 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partition_result result = new add_partition_result();
- if (e instanceof InvalidObjectException) {
- result.o1 = (InvalidObjectException) e;
+ get_table_names_by_filter_result result = new get_table_names_by_filter_result();
+ if (e instanceof MetaException) {
+ result.o1 = (MetaException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof AlreadyExistsException) {
- result.o2 = (AlreadyExistsException) e;
+ else if (e instanceof InvalidOperationException) {
+ result.o2 = (InvalidOperationException) e;
result.setO2IsSet(true);
msg = result;
}
- else if (e instanceof MetaException) {
- result.o3 = (MetaException) e;
+ else if (e instanceof UnknownDBException) {
+ result.o3 = (UnknownDBException) e;
result.setO3IsSet(true);
msg = result;
}
@@ -19985,26 +19904,25 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.add_partition(args.new_part,resultHandler);
+ public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+ iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_with_environment_context_args, Partition> {
- public add_partition_with_environment_context() {
- super("add_partition_with_environment_context");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_args, Void> {
+ public alter_table() {
+ super("alter_table");
}
- public add_partition_with_environment_context_args getEmptyArgsInstance() {
- return new add_partition_with_environment_context_args();
+ public alter_table_args getEmptyArgsInstance() {
+ return new alter_table_args();
}
- public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Partition>() {
- public void onComplete(Partition o) {
- add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
- result.success = o;
+ return new AsyncMethodCallback<Void>() {
+ public void onComplete(Void o) {
+ alter_table_result result = new alter_table_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -20016,20 +19934,15 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
- if (e instanceof InvalidObjectException) {
- result.o1 = (InvalidObjectException) e;
+ alter_table_result result = new alter_table_result();
+ if (e instanceof InvalidOperationException) {
+ result.o1 = (InvalidOperationException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof AlreadyExistsException) {
- result.o2 = (AlreadyExistsException) e;
- result.setO2IsSet(true);
- msg = result;
- }
else if (e instanceof MetaException) {
- result.o3 = (MetaException) e;
- result.setO3IsSet(true);
+ result.o2 = (MetaException) e;
+ result.setO2IsSet(true);
msg = result;
}
else
@@ -20052,27 +19965,25 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler);
+ public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_args, Integer> {
- public add_partitions() {
- super("add_partitions");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_environment_context_args, Void> {
+ public alter_table_with_environment_context() {
+ super("alter_table_with_environment_context");
}
- public add_partitions_args getEmptyArgsInstance() {
- return new add_partitions_args();
+ public alter_table_with_environment_context_args getEmptyArgsInstance() {
+ return new alter_table_with_environment_context_args();
}
- public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Integer>() {
- public void onComplete(Integer o) {
- add_partitions_result result = new add_partitions_result();
- result.success = o;
- result.setSuccessIsSet(true);
+ return new AsyncMethodCallback<Void>() {
+ public void onComplete(Void o) {
+ alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -20084,20 +19995,15 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partitions_result result = new add_partitions_result();
- if (e instanceof InvalidObjectException) {
- result.o1 = (InvalidObjectException) e;
+ alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
+ if (e instanceof InvalidOperationException) {
+ result.o1 = (InvalidOperationException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof AlreadyExistsException) {
- result.o2 = (AlreadyExistsException) e;
- result.setO2IsSet(true);
- msg = result;
- }
else if (e instanceof MetaException) {
- result.o3 = (MetaException) e;
- result.setO3IsSet(true);
+ result.o2 = (MetaException) e;
+ result.setO2IsSet(true);
msg = result;
}
else
@@ -20120,27 +20026,25 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
- iface.add_partitions(args.new_parts,resultHandler);
+ public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_pspec_args, Integer> {
- public add_partitions_pspec() {
- super("add_partitions_pspec");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_cascade<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_cascade_args, Void> {
+ public alter_table_with_cascade() {
+ super("alter_table_with_cascade");
}
- public add_partitions_pspec_args getEmptyArgsInstance() {
- return new add_partitions_pspec_args();
+ public alter_table_with_cascade_args getEmptyArgsInstance() {
+ return new alter_table_with_cascade_args();
}
- public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Integer>() {
- public void onComplete(Integer o) {
- add_partitions_pspec_result result = new add_partitions_pspec_result();
- result.success = o;
- result.setSuccessIsSet(true);
+ return new AsyncMethodCallback<Void>() {
+ public void onComplete(Void o) {
+ alter_table_with_cascade_result result = new alter_table_with_cascade_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -20152,20 +20056,15 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partitions_pspec_result result = new add_partitions_pspec_result();
- if (e instanceof InvalidObjectException) {
- result.o1 = (InvalidObjectException) e;
+ alter_table_with_cascade_result result = new alter_table_with_cascade_result();
+ if (e instanceof InvalidOperationException) {
+ result.o1 = (InvalidOperationException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof AlreadyExistsException) {
- result.o2 = (AlreadyExistsException) e;
- result.setO2IsSet(true);
- msg = result;
- }
else if (e instanceof MetaException) {
- result.o3 = (MetaException) e;
- result.setO3IsSet(true);
+ result.o2 = (MetaException) e;
+ result.setO2IsSet(true);
msg = result;
}
else
@@ -20188,25 +20087,25 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
- iface.add_partitions_pspec(args.new_parts,resultHandler);
+ public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_args, Partition> {
- public append_partition() {
- super("append_partition");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_args, Partition> {
+ public add_partition() {
+ super("add_partition");
}
- public append_partition_args getEmptyArgsInstance() {
- return new append_partition_args();
+ public add_partition_args getEmptyArgsInstance() {
+ return new add_partition_args();
}
public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Partition>() {
public void onComplete(Partition o) {
- append_partition_result result = new append_partition_result();
+ add_partition_result result = new add_partition_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -20219,7 +20118,7 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- append_partition_result result = new append_partition_result();
+ add_partition_result result = new add_partition_result();
if (e instanceof InvalidObjectException) {
result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
@@ -20255,25 +20154,25 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler);
+ public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.add_partition(args.new_part,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_req_args, AddPartitionsResult> {
- public add_partitions_req() {
- super("add_partitions_req");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_with_environment_context_args, Partition> {
+ public add_partition_with_environment_context() {
+ super("add_partition_with_environment_context");
}
- public add_partitions_req_args getEmptyArgsInstance() {
- return new add_partitions_req_args();
+ public add_partition_with_environment_context_args getEmptyArgsInstance() {
+ return new add_partition_with_environment_context_args();
}
- public AsyncMethodCallback<AddPartitionsResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<AddPartitionsResult>() {
- public void onComplete(AddPartitionsResult o) {
- add_partitions_req_result result = new add_partitions_req_result();
+ return new AsyncMethodCallback<Partition>() {
+ public void onComplete(Partition o) {
+ add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -20286,7 +20185,7 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partitions_req_result result = new add_partitions_req_result();
+ add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
if (e instanceof InvalidObjectException) {
result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
@@ -20322,26 +20221,27 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback<AddPartitionsResult> resultHandler) throws TException {
- iface.add_partitions_req(args.request,resultHandler);
+ public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_with_environment_context_args, Partition> {
- public append_partition_with_environment_context() {
- super("append_partition_with_environment_context");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_args, Integer> {
+ public add_partitions() {
+ super("add_partitions");
}
- public append_partition_with_environment_context_args getEmptyArgsInstance() {
- return new append_partition_with_environment_context_args();
+ public add_partitions_args getEmptyArgsInstance() {
+ return new add_partitions_args();
}
- public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Partition>() {
- public void onComplete(Partition o) {
- append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+ return new AsyncMethodCallback<Integer>() {
+ public void onComplete(Integer o) {
+ add_partitions_result result = new add_partitions_result();
result.success = o;
+ result.setSuccessIsSet(true);
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -20353,7 +20253,7 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+ add_partitions_result result = new add_partitions_result();
if (e instanceof InvalidObjectException) {
result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
@@ -20389,26 +20289,27 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler);
+ public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
+ iface.add_partitions(args.new_parts,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_args, Partition> {
- public append_partition_by_name() {
- super("append_partition_by_name");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_pspec_args, Integer> {
+ public add_partitions_pspec() {
+ super("add_partitions_pspec");
}
- public append_partition_by_name_args getEmptyArgsInstance() {
- return new append_partition_by_name_args();
+ public add_partitions_pspec_args getEmptyArgsInstance() {
+ return new add_partitions_pspec_args();
}
- public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Partition>() {
- public void onComplete(Partition o) {
- append_partition_by_name_result result = new append_partition_by_name_result();
+ return new AsyncMethodCallback<Integer>() {
+ public void onComplete(Integer o) {
+ add_partitions_pspec_result result = new add_partitions_pspec_result();
result.success = o;
+ result.setSuccessIsSet(true);
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -20420,7 +20321,7 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- append_partition_by_name_result result = new append_partition_by_name_result();
+ add_partitions_pspec_result result = new add_partitions_pspec_result();
if (e instanceof InvalidObjectException) {
result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
@@ -20456,25 +20357,25 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
+ public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
+ iface.add_partitions_pspec(args.new_parts,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_with_environment_context_args, Partition> {
- public append_partition_by_name_with_environment_context() {
- super("append_partition_by_name_with_environment_context");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_args, Partition> {
+ public append_partition() {
+ super("append_partition");
}
- public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() {
- return new append_partition_by_name_with_environment_context_args();
+ public append_partition_args getEmptyArgsInstance() {
+ return new append_partition_args();
}
public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Partition>() {
public void onComplete(Partition o) {
- append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+ append_partition_result result = new append_partition_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -20487,7 +20388,7 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+ append_partition_result result = new append_partition_result();
if (e instanceof InvalidObjectException) {
result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
@@ -20523,27 +20424,26 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, append_partition_by_name_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context,resultHandler);
+ public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler);
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_partition_args, Boolean> {
- public drop_partition() {
- super("drop_partition");
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_req_args, AddPartitionsResult> {
+ public add_partitions_req() {
+ super("add_partitions_req");
}
- public drop_partition_args getEmptyArgsInstance() {
- return new drop_partition_args();
+ public add_partitions_req_args getEmptyArgsInstance() {
+ return new add_partitions_req_args();
}
- public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<AddPartitionsResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Boolean>() {
- public void onComplete(Boolean o) {
- drop_partition_result result = new drop_partition_result();
+ return new AsyncMethodCallback<AddPartitionsResult>() {
+ public void onComplete(AddPartitionsResult o) {
+ add_partitions_req_result result = new add_partitions_req_result();
result.success = o;
- result.setSuccessIsSet(true);
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -20555,14 +20455,283 @@ import org.slf4j.LoggerFactory;
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- drop_partition_result result = new drop_partition_result();
- if (e instanceof NoSuchObjectException) {
- result.o1 = (NoSuchObjectException) e;
+ add_partitions_req_result result = new add_partitions_req_result();
+ if (e instanceof InvalidObjectException) {
+ result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof MetaException) {
- result.o2 = (MetaException) e;
+ else if (e instanceof AlreadyExistsException) {
+ result.o2 = (AlreadyExistsException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback<AddPartitionsResult> resultHandler) throws TException {
+ iface.add_partitions_req(args.request,resultHandler);
+ }
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_with_environment_context_args, Partition> {
+ public append_partition_with_environment_context() {
+ super("append_partition_with_environment_context");
+ }
+
+ public append_partition_with_environment_context_args getEmptyArgsInstance() {
+ return new append_partition_with_environment_context_args();
+ }
+
+ public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<Partition>() {
+ public void onComplete(Partition o) {
+ append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+ if (e instanceof InvalidObjectException) {
+ result.o1 = (InvalidObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof AlreadyExistsException) {
+ result.o2 = (AlreadyExistsException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler);
+ }
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_args, Partition> {
+ public append_partition_by_name() {
+ super("append_partition_by_name");
+ }
+
+ public append_partition_by_name_args getEmptyArgsInstance() {
+ return new append_partition_by_name_args();
+ }
+
+ public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<Partition>() {
+ public void onComplete(Partition o) {
+ append_partition_by_name_result result = new append_partition_by_name_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ append_partition_by_name_result result = new append_partition_by_name_result();
+ if (e instanceof InvalidObjectException) {
+ result.o1 = (InvalidObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof AlreadyExistsException) {
+ result.o2 = (AlreadyExistsException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
+ }
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_with_environment_context_args, Partition> {
+ public append_partition_by_name_with_environment_context() {
+ super("append_partition_by_name_with_environment_context");
+ }
+
+ public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() {
+ return new append_partition_by_name_with_environment_context_args();
+ }
+
+ public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<Partition>() {
+ public void onComplete(Partition o) {
+ append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+ if (e instanceof InvalidObjectException) {
+ result.o1 = (InvalidObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof AlreadyExistsException) {
+ result.o2 = (AlreadyExistsException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, append_partition_by_name_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context,resultHandler);
+ }
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_partition_args, Boolean> {
+ public drop_partition() {
+ super("drop_partition");
+ }
+
+ public drop_partition_args getEmptyArgsInstance() {
+ return new drop_partition_args();
+ }
+
+ public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<Boolean>() {
+ public void onComplete(Boolean o) {
+ drop_partition_result result = new drop_partition_result();
+ result.success = o;
+ result.setSuccessIsSet(true);
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ drop_partition_result result = new drop_partition_result();
+ if (e instanceof NoSuchObjectException) {
+ result.o1 = (NoSuchObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o2 = (MetaException) e;
result.setO2IsSet(true);
msg = result;
}
@@ -63884,7 +64053,1201 @@ import org.slf4j.LoggerFactory;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetTablesResult.class)));
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetTablesResult.class)));
+ tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_objects_by_name_req_result.class, metaDataMap);
+ }
+
+ public get_table_objects_by_name_req_result() {
+ }
+
+ public get_table_objects_by_name_req_result(
+ GetTablesResult success,
+ MetaException o1,
+ InvalidOperationException o2,
+ UnknownDBException o3)
+ {
+ this();
+ this.success = success;
+ this.o1 = o1;
+ this.o2 = o2;
+ this.o3 = o3;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public get_table_objects_by_name_req_result(get_table_objects_by_name_req_result other) {
+ if (other.isSetSuccess()) {
+ this.success = new GetTablesResult(other.success);
+ }
+ if (other.isSetO1()) {
+ this.o1 = new MetaException(other.o1);
+ }
+ if (other.isSetO2()) {
+ this.o2 = new InvalidOperationException(other.o2);
+ }
+ if (other.isSetO3()) {
+ this.o3 = new UnknownDBException(other.o3);
+ }
+ }
+
+ public get_table_objects_by_name_req_result deepCopy() {
+ return new get_table_objects_by_name_req_result(this);
+ }
+
+ @Override
+ public void clear() {
+ this.success = null;
+ this.o1 = null;
+ this.o2 = null;
+ this.o3 = null;
+ }
+
+ public GetTablesResult getSuccess() {
+ return this.success;
+ }
+
+ public void setSuccess(GetTablesResult success) {
+ this.success = success;
+ }
+
+ public void unsetSuccess() {
+ this.success = null;
+ }
+
+ /** Returns true if field success is set (has been assigned a value) and false otherwise */
+ public boolean isSetSuccess() {
+ return this.success != null;
+ }
+
+ public void setSuccessIsSet(boolean value) {
+ if (!value) {
+ this.success = null;
+ }
+ }
+
+ public MetaException getO1() {
+ return this.o1;
+ }
+
+ public void setO1(MetaException o1) {
+ this.o1 = o1;
+ }
+
+ public void unsetO1() {
+ this.o1 = null;
+ }
+
+ /** Returns true if field o1 is set (has been assigned a value) and false otherwise */
+ public boolean isSetO1() {
+ return this.o1 != null;
+ }
+
+ public void setO1IsSet(boolean value) {
+ if (!value) {
+ this.o1 = null;
+ }
+ }
+
+ public InvalidOperationException getO2() {
+ return this.o2;
+ }
+
+ public void setO2(InvalidOperationException o2) {
+ this.o2 = o2;
+ }
+
+ public void unsetO2() {
+ this.o2 = null;
+ }
+
+ /** Returns true if field o2 is set (has been assigned a value) and false otherwise */
+ public boolean isSetO2() {
+ return this.o2 != null;
+ }
+
+ public void setO2IsSet(boolean value) {
+ if (!value) {
+ this.o2 = null;
+ }
+ }
+
+ public UnknownDBException getO3() {
+ return this.o3;
+ }
+
+ public void setO3(UnknownDBException o3) {
+ this.o3 = o3;
+ }
+
+ public void unsetO3() {
+ this.o3 = null;
+ }
+
+ /** Returns true if field o3 is set (has been assigned a value) and false otherwise */
+ public boolean isSetO3() {
+ return this.o3 != null;
+ }
+
+ public void setO3IsSet(boolean value) {
+ if (!value) {
+ this.o3 = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case SUCCESS:
+ if (value == null) {
+ unsetSuccess();
+ } else {
+ setSuccess((GetTablesResult)value);
+ }
+ break;
+
+ case O1:
+ if (value == null) {
+ unsetO1();
+ } else {
+ setO1((MetaException)value);
+ }
+ break;
+
+ case O2:
+ if (value == null) {
+ unsetO2();
+ } else {
+ setO2((InvalidOperationException)value);
+ }
+ break;
+
+ case O3:
+ if (value == null) {
+ unsetO3();
+ } else {
+ setO3((UnknownDBException)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case SUCCESS:
+ return getSuccess();
+
+ case O1:
+ return getO1();
+
+ case O2:
+ return getO2();
+
+ case O3:
+ return getO3();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case SUCCESS:
+ return isSetSuccess();
+ case O1:
+ return isSetO1();
+ case O2:
+ return isSetO2();
+ case O3:
+ return isSetO3();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof get_table_objects_by_name_req_result)
+ return this.equals((get_table_objects_by_name_req_result)that);
+ return false;
+ }
+
+ public boolean equals(get_table_objects_by_name_req_result that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_success = true && this.isSetSuccess();
+ boolean that_present_success = true && that.isSetSuccess();
+ if (this_present_success || that_present_success) {
+ if (!(this_present_success && that_present_success))
+ return false;
+ if (!this.success.equals(that.success))
+ return false;
+ }
+
+ boolean this_present_o1 = true && this.isSetO1();
+ boolean that_present_o1 = true && that.isSetO1();
+ if (this_present_o1 || that_present_o1) {
+ if (!(this_present_o1 && that_present_o1))
+ return false;
+ if (!this.o1.equals(that.o1))
+ return false;
+ }
+
+ boolean this_present_o2 = true && this.isSetO2();
+ boolean that_present_o2 = true && that.isSetO2();
+ if (this_present_o2 || that_present_o2) {
+ if (!(this_present_o2 && that_present_o2))
+ return false;
+ if (!this.o2.equals(that.o2))
+ return false;
+ }
+
+ boolean this_present_o3 = true && this.isSetO3();
+ boolean that_present_o3 = true && that.isSetO3();
+ if (this_present_o3 || that_present_o3) {
+ if (!(this_present_o3 && that_present_o3))
+ return false;
+ if (!this.o3.equals(that.o3))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_success = true && (isSetSuccess());
+ list.add(present_success);
+ if (present_success)
+ list.add(success);
+
+ boolean present_o1 = true && (isSetO1());
+ list.add(present_o1);
+ if (present_o1)
+ list.add(o1);
+
+ boolean present_o2 = true && (isSetO2());
+ list.add(present_o2);
+ if (present_o2)
+ list.add(o2);
+
+ boolean present_o3 = true && (isSetO3());
+ list.add(present_o3);
+ if (present_o3)
+ list.add(o3);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(get_table_objects_by_name_req_result other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetSuccess()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetO1()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetO2()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetO3()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("get_table_objects_by_name_req_result(");
+ boolean first = true;
+
+ sb.append("success:");
+ if (this.success == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.success);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o1:");
+ if (this.o1 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o1);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o2:");
+ if (this.o2 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o2);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o3:");
+ if (this.o3 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o3);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ if (success != null) {
+ success.validate();
+ }
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class get_table_objects_by_name_req_resultStandardSchemeFactory implements SchemeFactory {
+ public get_table_objects_by_name_req_resultStandardScheme getScheme() {
+ return new get_table_objects_by_name_req_resultStandardScheme();
+ }
+ }
+
+ private static class get_table_objects_by_name_req_resultStandardScheme extends StandardScheme<get_table_objects_by_name_req_result> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 0: // SUCCESS
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.success = new GetTablesResult();
+ struct.success.read(iprot);
+ struct.setSuccessIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 1: // O1
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.o1 = new MetaException();
+ struct.o1.read(iprot);
+ struct.setO1IsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // O2
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.o2 = new InvalidOperationException();
+ struct.o2.read(iprot);
+ struct.setO2IsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 3: // O3
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.o3 = new UnknownDBException();
+ struct.o3.read(iprot);
+ struct.setO3IsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.success != null) {
+ oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+ struct.success.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ if (struct.o1 != null) {
+ oprot.writeFieldBegin(O1_FIELD_DESC);
+ struct.o1.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ if (struct.o2 != null) {
+ oprot.writeFieldBegin(O2_FIELD_DESC);
+ struct.o2.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ if (struct.o3 != null) {
+ oprot.writeFieldBegin(O3_FIELD_DESC);
+ struct.o3.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class get_table_objects_by_name_req_resultTupleSchemeFactory implements SchemeFactory {
+ public get_table_objects_by_name_req_resultTupleScheme getScheme() {
+ return new get_table_objects_by_name_req_resultTupleScheme();
+ }
+ }
+
+ private static class get_table_objects_by_name_req_resultTupleScheme extends TupleScheme<get_table_objects_by_name_req_result> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetSuccess()) {
+ optionals.set(0);
+ }
+ if (struct.isSetO1()) {
+ optionals.set(1);
+ }
+ if (struct.isSetO2()) {
+ optionals.set(2);
+ }
+ if (struct.isSetO3()) {
+ optionals.set(3);
+ }
+ oprot.writeBitSet(optionals, 4);
+ if (struct.isSetSuccess()) {
+ struct.success.write(oprot);
+ }
+ if (struct.isSetO1()) {
+ struct.o1.write(oprot);
+ }
+ if (struct.isSetO2()) {
+ struct.o2.write(oprot);
+ }
+ if (struct.isSetO3()) {
+ struct.o3.write(oprot);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(4);
+ if (incoming.get(0)) {
+ struct.success = new GetTablesResult();
+ struct.success.read(iprot);
+ struct.setSuccessIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.o1 = new MetaException();
+ struct.o1.read(iprot);
+ struct.setO1IsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.o2 = new InvalidOperationException();
+ struct.o2.read(iprot);
+ struct.setO2IsSet(true);
+ }
+ if (incoming.get(3)) {
+ struct.o3 = new UnknownDBException();
+ struct.o3.read(iprot);
+ struct.setO3IsSet(true);
+ }
+ }
+ }
+
+ }
+
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info_args implements org.apache.thrift.TBase<get_materialization_invalidation_info_args, get_materialization_invalidation_info_args._Fields>, java.io.Serializable, Cloneable, Comparable<get_materialization_invalidation_info_args> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialization_invalidation_info_args");
+
+ private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField TBL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_names", org.apache.thrift.protocol.TType.LIST, (short)2);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new get_materialization_invalidation_info_argsStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new get_materialization_invalidation_info_argsTupleSchemeFactory());
+ }
+
+ private String dbname; // required
+ private List<String> tbl_names; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ DBNAME((short)1, "dbname"),
+ TBL_NAMES((short)2, "tbl_names");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // DBNAME
+ return DBNAME;
+ case 2: // TBL_NAMES
+ return TBL_NAMES;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(
<TRUNCATED>
[3/5] hive git commit: HIVE-18387: Minimize time that REBUILD locks
the materialized view (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
index b399d66..ccef024 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
@@ -38,8 +38,8 @@ import org.slf4j.LoggerFactory;
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Materialization implements org.apache.thrift.TBase<Materialization, Materialization._Fields>, java.io.Serializable, Cloneable, Comparable<Materialization> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Materialization");
- private static final org.apache.thrift.protocol.TField MATERIALIZATION_TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("materializationTable", org.apache.thrift.protocol.TType.STRUCT, (short)1);
- private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)2);
+ private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)1);
+ private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final org.apache.thrift.protocol.TField INVALIDATION_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("invalidationTime", org.apache.thrift.protocol.TType.I64, (short)3);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
@@ -48,14 +48,14 @@ import org.slf4j.LoggerFactory;
schemes.put(TupleScheme.class, new MaterializationTupleSchemeFactory());
}
- private Table materializationTable; // required
private Set<String> tablesUsed; // required
+ private String validTxnList; // optional
private long invalidationTime; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- MATERIALIZATION_TABLE((short)1, "materializationTable"),
- TABLES_USED((short)2, "tablesUsed"),
+ TABLES_USED((short)1, "tablesUsed"),
+ VALID_TXN_LIST((short)2, "validTxnList"),
INVALIDATION_TIME((short)3, "invalidationTime");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -71,10 +71,10 @@ import org.slf4j.LoggerFactory;
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
- case 1: // MATERIALIZATION_TABLE
- return MATERIALIZATION_TABLE;
- case 2: // TABLES_USED
+ case 1: // TABLES_USED
return TABLES_USED;
+ case 2: // VALID_TXN_LIST
+ return VALID_TXN_LIST;
case 3: // INVALIDATION_TIME
return INVALIDATION_TIME;
default:
@@ -119,14 +119,15 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __INVALIDATIONTIME_ISSET_ID = 0;
private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.VALID_TXN_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.MATERIALIZATION_TABLE, new org.apache.thrift.meta_data.FieldMetaData("materializationTable", org.apache.thrift.TFieldRequirementType.REQUIRED,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class)));
tmpMap.put(_Fields.TABLES_USED, new org.apache.thrift.meta_data.FieldMetaData("tablesUsed", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+ tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.INVALIDATION_TIME, new org.apache.thrift.meta_data.FieldMetaData("invalidationTime", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -137,12 +138,10 @@ import org.slf4j.LoggerFactory;
}
public Materialization(
- Table materializationTable,
Set<String> tablesUsed,
long invalidationTime)
{
this();
- this.materializationTable = materializationTable;
this.tablesUsed = tablesUsed;
this.invalidationTime = invalidationTime;
setInvalidationTimeIsSet(true);
@@ -153,13 +152,13 @@ import org.slf4j.LoggerFactory;
*/
public Materialization(Materialization other) {
__isset_bitfield = other.__isset_bitfield;
- if (other.isSetMaterializationTable()) {
- this.materializationTable = new Table(other.materializationTable);
- }
if (other.isSetTablesUsed()) {
Set<String> __this__tablesUsed = new HashSet<String>(other.tablesUsed);
this.tablesUsed = __this__tablesUsed;
}
+ if (other.isSetValidTxnList()) {
+ this.validTxnList = other.validTxnList;
+ }
this.invalidationTime = other.invalidationTime;
}
@@ -169,35 +168,12 @@ import org.slf4j.LoggerFactory;
@Override
public void clear() {
- this.materializationTable = null;
this.tablesUsed = null;
+ this.validTxnList = null;
setInvalidationTimeIsSet(false);
this.invalidationTime = 0;
}
- public Table getMaterializationTable() {
- return this.materializationTable;
- }
-
- public void setMaterializationTable(Table materializationTable) {
- this.materializationTable = materializationTable;
- }
-
- public void unsetMaterializationTable() {
- this.materializationTable = null;
- }
-
- /** Returns true if field materializationTable is set (has been assigned a value) and false otherwise */
- public boolean isSetMaterializationTable() {
- return this.materializationTable != null;
- }
-
- public void setMaterializationTableIsSet(boolean value) {
- if (!value) {
- this.materializationTable = null;
- }
- }
-
public int getTablesUsedSize() {
return (this.tablesUsed == null) ? 0 : this.tablesUsed.size();
}
@@ -236,6 +212,29 @@ import org.slf4j.LoggerFactory;
}
}
+ public String getValidTxnList() {
+ return this.validTxnList;
+ }
+
+ public void setValidTxnList(String validTxnList) {
+ this.validTxnList = validTxnList;
+ }
+
+ public void unsetValidTxnList() {
+ this.validTxnList = null;
+ }
+
+ /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidTxnList() {
+ return this.validTxnList != null;
+ }
+
+ public void setValidTxnListIsSet(boolean value) {
+ if (!value) {
+ this.validTxnList = null;
+ }
+ }
+
public long getInvalidationTime() {
return this.invalidationTime;
}
@@ -260,19 +259,19 @@ import org.slf4j.LoggerFactory;
public void setFieldValue(_Fields field, Object value) {
switch (field) {
- case MATERIALIZATION_TABLE:
+ case TABLES_USED:
if (value == null) {
- unsetMaterializationTable();
+ unsetTablesUsed();
} else {
- setMaterializationTable((Table)value);
+ setTablesUsed((Set<String>)value);
}
break;
- case TABLES_USED:
+ case VALID_TXN_LIST:
if (value == null) {
- unsetTablesUsed();
+ unsetValidTxnList();
} else {
- setTablesUsed((Set<String>)value);
+ setValidTxnList((String)value);
}
break;
@@ -289,12 +288,12 @@ import org.slf4j.LoggerFactory;
public Object getFieldValue(_Fields field) {
switch (field) {
- case MATERIALIZATION_TABLE:
- return getMaterializationTable();
-
case TABLES_USED:
return getTablesUsed();
+ case VALID_TXN_LIST:
+ return getValidTxnList();
+
case INVALIDATION_TIME:
return getInvalidationTime();
@@ -309,10 +308,10 @@ import org.slf4j.LoggerFactory;
}
switch (field) {
- case MATERIALIZATION_TABLE:
- return isSetMaterializationTable();
case TABLES_USED:
return isSetTablesUsed();
+ case VALID_TXN_LIST:
+ return isSetValidTxnList();
case INVALIDATION_TIME:
return isSetInvalidationTime();
}
@@ -332,15 +331,6 @@ import org.slf4j.LoggerFactory;
if (that == null)
return false;
- boolean this_present_materializationTable = true && this.isSetMaterializationTable();
- boolean that_present_materializationTable = true && that.isSetMaterializationTable();
- if (this_present_materializationTable || that_present_materializationTable) {
- if (!(this_present_materializationTable && that_present_materializationTable))
- return false;
- if (!this.materializationTable.equals(that.materializationTable))
- return false;
- }
-
boolean this_present_tablesUsed = true && this.isSetTablesUsed();
boolean that_present_tablesUsed = true && that.isSetTablesUsed();
if (this_present_tablesUsed || that_present_tablesUsed) {
@@ -350,6 +340,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_validTxnList = true && this.isSetValidTxnList();
+ boolean that_present_validTxnList = true && that.isSetValidTxnList();
+ if (this_present_validTxnList || that_present_validTxnList) {
+ if (!(this_present_validTxnList && that_present_validTxnList))
+ return false;
+ if (!this.validTxnList.equals(that.validTxnList))
+ return false;
+ }
+
boolean this_present_invalidationTime = true;
boolean that_present_invalidationTime = true;
if (this_present_invalidationTime || that_present_invalidationTime) {
@@ -366,16 +365,16 @@ import org.slf4j.LoggerFactory;
public int hashCode() {
List<Object> list = new ArrayList<Object>();
- boolean present_materializationTable = true && (isSetMaterializationTable());
- list.add(present_materializationTable);
- if (present_materializationTable)
- list.add(materializationTable);
-
boolean present_tablesUsed = true && (isSetTablesUsed());
list.add(present_tablesUsed);
if (present_tablesUsed)
list.add(tablesUsed);
+ boolean present_validTxnList = true && (isSetValidTxnList());
+ list.add(present_validTxnList);
+ if (present_validTxnList)
+ list.add(validTxnList);
+
boolean present_invalidationTime = true;
list.add(present_invalidationTime);
if (present_invalidationTime)
@@ -392,22 +391,22 @@ import org.slf4j.LoggerFactory;
int lastComparison = 0;
- lastComparison = Boolean.valueOf(isSetMaterializationTable()).compareTo(other.isSetMaterializationTable());
+ lastComparison = Boolean.valueOf(isSetTablesUsed()).compareTo(other.isSetTablesUsed());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetMaterializationTable()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.materializationTable, other.materializationTable);
+ if (isSetTablesUsed()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablesUsed, other.tablesUsed);
if (lastComparison != 0) {
return lastComparison;
}
}
- lastComparison = Boolean.valueOf(isSetTablesUsed()).compareTo(other.isSetTablesUsed());
+ lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetTablesUsed()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablesUsed, other.tablesUsed);
+ if (isSetValidTxnList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList);
if (lastComparison != 0) {
return lastComparison;
}
@@ -442,14 +441,6 @@ import org.slf4j.LoggerFactory;
StringBuilder sb = new StringBuilder("Materialization(");
boolean first = true;
- sb.append("materializationTable:");
- if (this.materializationTable == null) {
- sb.append("null");
- } else {
- sb.append(this.materializationTable);
- }
- first = false;
- if (!first) sb.append(", ");
sb.append("tablesUsed:");
if (this.tablesUsed == null) {
sb.append("null");
@@ -457,6 +448,16 @@ import org.slf4j.LoggerFactory;
sb.append(this.tablesUsed);
}
first = false;
+ if (isSetValidTxnList()) {
+ if (!first) sb.append(", ");
+ sb.append("validTxnList:");
+ if (this.validTxnList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validTxnList);
+ }
+ first = false;
+ }
if (!first) sb.append(", ");
sb.append("invalidationTime:");
sb.append(this.invalidationTime);
@@ -467,10 +468,6 @@ import org.slf4j.LoggerFactory;
public void validate() throws org.apache.thrift.TException {
// check for required fields
- if (!isSetMaterializationTable()) {
- throw new org.apache.thrift.protocol.TProtocolException("Required field 'materializationTable' is unset! Struct:" + toString());
- }
-
if (!isSetTablesUsed()) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'tablesUsed' is unset! Struct:" + toString());
}
@@ -480,9 +477,6 @@ import org.slf4j.LoggerFactory;
}
// check for sub-struct validity
- if (materializationTable != null) {
- materializationTable.validate();
- }
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
@@ -521,16 +515,7 @@ import org.slf4j.LoggerFactory;
break;
}
switch (schemeField.id) {
- case 1: // MATERIALIZATION_TABLE
- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
- struct.materializationTable = new Table();
- struct.materializationTable.read(iprot);
- struct.setMaterializationTableIsSet(true);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- break;
- case 2: // TABLES_USED
+ case 1: // TABLES_USED
if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
{
org.apache.thrift.protocol.TSet _set746 = iprot.readSetBegin();
@@ -548,6 +533,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 2: // VALID_TXN_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validTxnList = iprot.readString();
+ struct.setValidTxnListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
case 3: // INVALIDATION_TIME
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.invalidationTime = iprot.readI64();
@@ -569,11 +562,6 @@ import org.slf4j.LoggerFactory;
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
- if (struct.materializationTable != null) {
- oprot.writeFieldBegin(MATERIALIZATION_TABLE_FIELD_DESC);
- struct.materializationTable.write(oprot);
- oprot.writeFieldEnd();
- }
if (struct.tablesUsed != null) {
oprot.writeFieldBegin(TABLES_USED_FIELD_DESC);
{
@@ -586,6 +574,13 @@ import org.slf4j.LoggerFactory;
}
oprot.writeFieldEnd();
}
+ if (struct.validTxnList != null) {
+ if (struct.isSetValidTxnList()) {
+ oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC);
+ oprot.writeString(struct.validTxnList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldBegin(INVALIDATION_TIME_FIELD_DESC);
oprot.writeI64(struct.invalidationTime);
oprot.writeFieldEnd();
@@ -606,7 +601,6 @@ import org.slf4j.LoggerFactory;
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
- struct.materializationTable.write(oprot);
{
oprot.writeI32(struct.tablesUsed.size());
for (String _iter750 : struct.tablesUsed)
@@ -615,14 +609,19 @@ import org.slf4j.LoggerFactory;
}
}
oprot.writeI64(struct.invalidationTime);
+ BitSet optionals = new BitSet();
+ if (struct.isSetValidTxnList()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetValidTxnList()) {
+ oprot.writeString(struct.validTxnList);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- struct.materializationTable = new Table();
- struct.materializationTable.read(iprot);
- struct.setMaterializationTableIsSet(true);
{
org.apache.thrift.protocol.TSet _set751 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
struct.tablesUsed = new HashSet<String>(2*_set751.size);
@@ -636,6 +635,11 @@ import org.slf4j.LoggerFactory;
struct.setTablesUsedIsSet(true);
struct.invalidationTime = iprot.readI64();
struct.setInvalidationTimeIsSet(true);
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.validTxnList = iprot.readString();
+ struct.setValidTxnListIsSet(true);
+ }
}
}
[4/5] hive git commit: HIVE-18387: Minimize time that REBUILD locks
the materialized view (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
index 20b138c..d8863a2 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
@@ -91,6 +91,7 @@ STAGE DEPENDENCIES:
Stage-2 depends on stages: Stage-1
Stage-4 depends on stages: Stage-2, Stage-0
Stage-3 depends on stages: Stage-4
+ Stage-5 depends on stages: Stage-3
Stage-0 depends on stages: Stage-1
STAGE PLANS:
@@ -201,6 +202,9 @@ STAGE PLANS:
Stats Work
Basic Stats Work:
+ Stage: Stage-5
+ Materialized View Work
+
Stage: Stage-0
Move Operator
files:
@@ -432,24 +436,25 @@ POSTHOOK: Input: default@cmv_basetable_2
1
PREHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
- Stage-4 depends on stages: Stage-2, Stage-0
- Stage-3 depends on stages: Stage-4
- Stage-0 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4 depends on stages: Stage-3
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
- Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
@@ -471,7 +476,7 @@ STAGE PLANS:
Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: may be used (ACID table)
- Map 4
+ Map 5
Map Operator Tree:
TableScan
alias: cmv_basetable_2
@@ -528,39 +533,71 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.cmv_mat_view
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: decimal(10,2))
+ outputColumnNames: a, c
+ Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,min:decimal(10,2),max:decimal(10,2),countnulls:bigint,bitvector:binary>)
+ Reducer 4
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-2
Dependency Collection
- Stage: Stage-4
- Create View Operator:
- Create View
- columns: a int, c decimal(10,2)
- name: default.cmv_mat_view
+ Stage: Stage-0
+ Move Operator
+ tables:
replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.cmv_mat_view
Stage: Stage-3
Stats Work
Basic Stats Work:
+ Column Stats Desc:
+ Columns: a, c
+ Column Types: int, decimal(10,2)
+ Table: default.cmv_mat_view
- Stage: Stage-0
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
+ Stage: Stage-4
+ Materialized View Work
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -748,17 +785,17 @@ POSTHOOK: Input: default@cmv_basetable_2
#### A masked pattern was here ####
1
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
index 20b138c..d8863a2 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
@@ -91,6 +91,7 @@ STAGE DEPENDENCIES:
Stage-2 depends on stages: Stage-1
Stage-4 depends on stages: Stage-2, Stage-0
Stage-3 depends on stages: Stage-4
+ Stage-5 depends on stages: Stage-3
Stage-0 depends on stages: Stage-1
STAGE PLANS:
@@ -201,6 +202,9 @@ STAGE PLANS:
Stats Work
Basic Stats Work:
+ Stage: Stage-5
+ Materialized View Work
+
Stage: Stage-0
Move Operator
files:
@@ -432,24 +436,25 @@ POSTHOOK: Input: default@cmv_basetable_2
1
PREHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
- Stage-4 depends on stages: Stage-2, Stage-0
- Stage-3 depends on stages: Stage-4
- Stage-0 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4 depends on stages: Stage-3
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
- Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
@@ -471,7 +476,7 @@ STAGE PLANS:
Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: may be used (ACID table)
- Map 4
+ Map 5
Map Operator Tree:
TableScan
alias: cmv_basetable_2
@@ -528,39 +533,71 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.cmv_mat_view
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: decimal(10,2))
+ outputColumnNames: a, c
+ Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,min:decimal(10,2),max:decimal(10,2),countnulls:bigint,bitvector:binary>)
+ Reducer 4
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-2
Dependency Collection
- Stage: Stage-4
- Create View Operator:
- Create View
- columns: a int, c decimal(10,2)
- name: default.cmv_mat_view
+ Stage: Stage-0
+ Move Operator
+ tables:
replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.cmv_mat_view
Stage: Stage-3
Stats Work
Basic Stats Work:
+ Column Stats Desc:
+ Columns: a, c
+ Column Types: int, decimal(10,2)
+ Table: default.cmv_mat_view
- Stage: Stage-0
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
+ Stage: Stage-4
+ Materialized View Work
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -748,17 +785,17 @@ POSTHOOK: Input: default@cmv_basetable_2
#### A masked pattern was here ####
1
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
index 0d8d238..29e408c 100644
--- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
+++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
@@ -92,6 +92,7 @@ STAGE DEPENDENCIES:
Stage-0 depends on stages: Stage-2
Stage-5 depends on stages: Stage-0
Stage-3 depends on stages: Stage-5
+ Stage-6 depends on stages: Stage-3
STAGE PLANS:
Stage: Stage-1
@@ -198,6 +199,9 @@ STAGE PLANS:
Stats Work
Basic Stats Work:
+ Stage: Stage-6
+ Materialized View Work
+
PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE AS
SELECT cmv_basetable.a, cmv_basetable_2.c
FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -433,16 +437,17 @@ POSTHOOK: Input: default@cmv_basetable_2
3
PREHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
Stage-0 depends on stages: Stage-2
- Stage-5 depends on stages: Stage-0
- Stage-3 depends on stages: Stage-5
+ Stage-3 depends on stages: Stage-0, Stage-4
+ Stage-6 depends on stages: Stage-3
+ Stage-4 depends on stages: Stage-2
STAGE PLANS:
Stage: Stage-1
@@ -523,36 +528,77 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.cmv_mat_view
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: decimal(10,2))
+ outputColumnNames: a, c
+ Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-0
Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-5
- Create View Operator:
- Create View
- columns: a int, c decimal(10,2)
- name: default.cmv_mat_view
+ tables:
replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.cmv_mat_view
Stage: Stage-3
Stats Work
Basic Stats Work:
+ Column Stats Desc:
+ Columns: a, c
+ Column Types: int, decimal(10,2)
+ Table: default.cmv_mat_view
+
+ Stage: Stage-6
+ Materialized View Work
+
+ Stage: Stage-4
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,min:decimal(10,2),max:decimal(10,2),countnulls:bigint,bitvector:binary>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -750,17 +796,17 @@ POSTHOOK: Input: default@cmv_basetable_2
#### A masked pattern was here ####
1
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out
index 8ab1517..48c0ecb 100644
--- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out
+++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_4.q.out
@@ -92,6 +92,7 @@ STAGE DEPENDENCIES:
Stage-0 depends on stages: Stage-2
Stage-5 depends on stages: Stage-0
Stage-3 depends on stages: Stage-5
+ Stage-6 depends on stages: Stage-3
STAGE PLANS:
Stage: Stage-1
@@ -197,6 +198,9 @@ STAGE PLANS:
Stats Work
Basic Stats Work:
+ Stage: Stage-6
+ Materialized View Work
+
PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view AS
SELECT cmv_basetable.a, cmv_basetable_2.c
FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -217,6 +221,49 @@ POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+PREHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cmv_mat_view
+POSTHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cmv_mat_view
+# col_name data_type comment
+a int
+c decimal(10,2)
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MATERIALIZED_VIEW
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 2
+ rawDataSize 232
+ totalSize 325
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+
+# View Information
+View Original Text: SELECT cmv_basetable.a, cmv_basetable_2.c
+ FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+ WHERE cmv_basetable_2.c > 10.0
+ GROUP BY cmv_basetable.a, cmv_basetable_2.c
+View Expanded Text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+ FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`)
+ WHERE `cmv_basetable_2`.`c` > 10.0
+ GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+View Rewrite Enabled: No
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -389,6 +436,49 @@ POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE
POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE
POSTHOOK: Input: default@cmv_mat_view
POSTHOOK: Output: default@cmv_mat_view
+PREHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cmv_mat_view
+POSTHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cmv_mat_view
+# col_name data_type comment
+a int
+c decimal(10,2)
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MATERIALIZED_VIEW
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 2
+ rawDataSize 232
+ totalSize 325
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+
+# View Information
+View Original Text: SELECT cmv_basetable.a, cmv_basetable_2.c
+ FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+ WHERE cmv_basetable_2.c > 10.0
+ GROUP BY cmv_basetable.a, cmv_basetable_2.c
+View Expanded Text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+ FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`)
+ WHERE `cmv_basetable_2`.`c` > 10.0
+ GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+View Rewrite Enabled: Yes
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
@@ -516,16 +606,17 @@ POSTHOOK: Input: default@cmv_basetable_2
3
PREHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
Stage-0 depends on stages: Stage-2
- Stage-5 depends on stages: Stage-0
- Stage-3 depends on stages: Stage-5
+ Stage-3 depends on stages: Stage-0, Stage-4
+ Stage-6 depends on stages: Stage-3
+ Stage-4 depends on stages: Stage-2
STAGE PLANS:
Stage: Stage-1
@@ -606,36 +697,120 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.cmv_mat_view
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: decimal(10,2))
+ outputColumnNames: a, c
+ Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-0
Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-5
- Create View Operator:
- Create View
- columns: a int, c decimal(10,2)
- name: default.cmv_mat_view
+ tables:
replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.cmv_mat_view
Stage: Stage-3
Stats Work
Basic Stats Work:
+ Column Stats Desc:
+ Columns: a, c
+ Column Types: int, decimal(10,2)
+ Table: default.cmv_mat_view
+
+ Stage: Stage-6
+ Materialized View Work
+
+ Stage: Stage-4
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 1056 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,min:decimal(10,2),max:decimal(10,2),countnulls:bigint,bitvector:binary>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 1088 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
PREHOOK: Input: default@cmv_basetable_2
-PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_mat_view
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view REBUILD
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
POSTHOOK: Input: default@cmv_basetable_2
-POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_mat_view
+POSTHOOK: Lineage: cmv_mat_view.a SIMPLE [(cmv_basetable)cmv_basetable.FieldSchema(name:a, type:int, comment:null), ]
+POSTHOOK: Lineage: cmv_mat_view.c SIMPLE [(cmv_basetable_2)cmv_basetable_2.FieldSchema(name:c, type:decimal(10,2), comment:null), ]
+PREHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cmv_mat_view
+POSTHOOK: query: DESCRIBE FORMATTED cmv_mat_view
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cmv_mat_view
+# col_name data_type comment
+a int
+c decimal(10,2)
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MATERIALIZED_VIEW
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"c\":\"true\"}}
+ numFiles 1
+ numRows 3
+ rawDataSize 348
+ totalSize 332
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+
+# View Information
+View Original Text: SELECT cmv_basetable.a, cmv_basetable_2.c
+ FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
+ WHERE cmv_basetable_2.c > 10.0
+ GROUP BY cmv_basetable.a, cmv_basetable_2.c
+View Expanded Text: SELECT `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+ FROM `default`.`cmv_basetable` JOIN `default`.`cmv_basetable_2` ON (`cmv_basetable`.`a` = `cmv_basetable_2`.`a`)
+ WHERE `cmv_basetable_2`.`c` > 10.0
+ GROUP BY `cmv_basetable`.`a`, `cmv_basetable_2`.`c`
+View Rewrite Enabled: Yes
PREHOOK: query: EXPLAIN
SELECT cmv_basetable.a
FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a)
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index af0fd6b..6bd6aa2 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -9293,6 +9293,265 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read
}
+ThriftHiveMetastore_update_creation_metadata_args::~ThriftHiveMetastore_update_creation_metadata_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->dbname);
+ this->__isset.dbname = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->tbl_name);
+ this->__isset.tbl_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->creation_metadata.read(iprot);
+ this->__isset.creation_metadata = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_args");
+
+ xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->dbname);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->tbl_name);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->creation_metadata.write(oprot);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_update_creation_metadata_pargs::~ThriftHiveMetastore_update_creation_metadata_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_pargs");
+
+ xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->dbname)));
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString((*(this->tbl_name)));
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->creation_metadata)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_update_creation_metadata_result::~ThriftHiveMetastore_update_creation_metadata_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o3.read(iprot);
+ this->__isset.o3 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_result");
+
+ if (this->__isset.o1) {
+ xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->o1.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o2) {
+ xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->o2.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o3) {
+ xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->o3.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_update_creation_metadata_presult::~ThriftHiveMetastore_update_creation_metadata_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_update_creation_metadata_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o3.read(iprot);
+ this->__isset.o3 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+
ThriftHiveMetastore_get_table_names_by_filter_args::~ThriftHiveMetastore_get_table_names_by_filter_args() throw() {
}
@@ -47026,6 +47285,70 @@ void ThriftHiveMetastoreClient::recv_get_materialization_invalidation_info(std::
throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result");
}
+void ThriftHiveMetastoreClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata)
+{
+ send_update_creation_metadata(dbname, tbl_name, creation_metadata);
+ recv_update_creation_metadata();
+}
+
+void ThriftHiveMetastoreClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ ThriftHiveMetastore_update_creation_metadata_pargs args;
+ args.dbname = &dbname;
+ args.tbl_name = &tbl_name;
+ args.creation_metadata = &creation_metadata;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void ThriftHiveMetastoreClient::recv_update_creation_metadata()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("update_creation_metadata") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ ThriftHiveMetastore_update_creation_metadata_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.o1) {
+ throw result.o1;
+ }
+ if (result.__isset.o2) {
+ throw result.o2;
+ }
+ if (result.__isset.o3) {
+ throw result.o3;
+ }
+ return;
+}
+
void ThriftHiveMetastoreClient::get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables)
{
send_get_table_names_by_filter(dbname, filter, max_tables);
@@ -58420,6 +58743,68 @@ void ThriftHiveMetastoreProcessor::process_get_materialization_invalidation_info
}
}
+void ThriftHiveMetastoreProcessor::process_update_creation_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("ThriftHiveMetastore.update_creation_metadata", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.update_creation_metadata");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.update_creation_metadata");
+ }
+
+ ThriftHiveMetastore_update_creation_metadata_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.update_creation_metadata", bytes);
+ }
+
+ ThriftHiveMetastore_update_creation_metadata_result result;
+ try {
+ iface_->update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata);
+ } catch (MetaException &o1) {
+ result.o1 = o1;
+ result.__isset.o1 = true;
+ } catch (InvalidOperationException &o2) {
+ result.o2 = o2;
+ result.__isset.o2 = true;
+ } catch (UnknownDBException &o3) {
+ result.o3 = o3;
+ result.__isset.o3 = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.update_creation_metadata");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.update_creation_metadata");
+ }
+
+ oprot->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.update_creation_metadata", bytes);
+ }
+}
+
void ThriftHiveMetastoreProcessor::process_get_table_names_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
{
void* ctx = NULL;
@@ -70329,6 +70714,98 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_materialization_invalidation_
} // end while(true)
}
+void ThriftHiveMetastoreConcurrentClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata)
+{
+ int32_t seqid = send_update_creation_metadata(dbname, tbl_name, creation_metadata);
+ recv_update_creation_metadata(seqid);
+}
+
+int32_t ThriftHiveMetastoreConcurrentClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata)
+{
+ int32_t cseqid = this->sync_.generateSeqId();
+ ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
+ oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ ThriftHiveMetastore_update_creation_metadata_pargs args;
+ args.dbname = &dbname;
+ args.tbl_name = &tbl_name;
+ args.creation_metadata = &creation_metadata;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+
+ sentry.commit();
+ return cseqid;
+}
+
+void ThriftHiveMetastoreConcurrentClient::recv_update_creation_metadata(const int32_t seqid)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ // the read mutex gets dropped and reacquired as part of waitForWork()
+ // The destructor of this sentry wakes up other clients
+ ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
+
+ while(true) {
+ if(!this->sync_.getPending(fname, mtype, rseqid)) {
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ }
+ if(seqid == rseqid) {
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ sentry.commit();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("update_creation_metadata") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ // in a bad state, don't commit
+ using ::apache::thrift::protocol::TProtocolException;
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ }
+ ThriftHiveMetastore_update_creation_metadata_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.o1) {
+ sentry.commit();
+ throw result.o1;
+ }
+ if (result.__isset.o2) {
+ sentry.commit();
+ throw result.o2;
+ }
+ if (result.__isset.o3) {
+ sentry.commit();
+ throw result.o3;
+ }
+ sentry.commit();
+ return;
+ }
+ // seqid != rseqid
+ this->sync_.updatePending(fname, mtype, rseqid);
+
+ // this will temporarily unlock the readMutex, and let other clients get work done
+ this->sync_.waitForWork(seqid);
+ } // end while(true)
+}
+
void ThriftHiveMetastoreConcurrentClient::get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables)
{
int32_t seqid = send_get_table_names_by_filter(dbname, filter, max_tables);
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index bfa17eb..2466498 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -59,6 +59,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService
virtual void get_table_req(GetTableResult& _return, const GetTableRequest& req) = 0;
virtual void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) = 0;
virtual void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) = 0;
+ virtual void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) = 0;
virtual void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) = 0;
virtual void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) = 0;
virtual void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) = 0;
@@ -345,6 +346,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
void get_materialization_invalidation_info(std::map<std::string, Materialization> & /* _return */, const std::string& /* dbname */, const std::vector<std::string> & /* tbl_names */) {
return;
}
+ void update_creation_metadata(const std::string& /* dbname */, const std::string& /* tbl_name */, const CreationMetadata& /* creation_metadata */) {
+ return;
+ }
void get_table_names_by_filter(std::vector<std::string> & /* _return */, const std::string& /* dbname */, const std::string& /* filter */, const int16_t /* max_tables */) {
return;
}
@@ -5339,6 +5343,140 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_presult {
};
+typedef struct _ThriftHiveMetastore_update_creation_metadata_args__isset {
+ _ThriftHiveMetastore_update_creation_metadata_args__isset() : dbname(false), tbl_name(false), creation_metadata(false) {}
+ bool dbname :1;
+ bool tbl_name :1;
+ bool creation_metadata :1;
+} _ThriftHiveMetastore_update_creation_metadata_args__isset;
+
+class ThriftHiveMetastore_update_creation_metadata_args {
+ public:
+
+ ThriftHiveMetastore_update_creation_metadata_args(const ThriftHiveMetastore_update_creation_metadata_args&);
+ ThriftHiveMetastore_update_creation_metadata_args& operator=(const ThriftHiveMetastore_update_creation_metadata_args&);
+ ThriftHiveMetastore_update_creation_metadata_args() : dbname(), tbl_name() {
+ }
+
+ virtual ~ThriftHiveMetastore_update_creation_metadata_args() throw();
+ std::string dbname;
+ std::string tbl_name;
+ CreationMetadata creation_metadata;
+
+ _ThriftHiveMetastore_update_creation_metadata_args__isset __isset;
+
+ void __set_dbname(const std::string& val);
+
+ void __set_tbl_name(const std::string& val);
+
+ void __set_creation_metadata(const CreationMetadata& val);
+
+ bool operator == (const ThriftHiveMetastore_update_creation_metadata_args & rhs) const
+ {
+ if (!(dbname == rhs.dbname))
+ return false;
+ if (!(tbl_name == rhs.tbl_name))
+ return false;
+ if (!(creation_metadata == rhs.creation_metadata))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_update_creation_metadata_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_update_creation_metadata_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_update_creation_metadata_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_update_creation_metadata_pargs() throw();
+ const std::string* dbname;
+ const std::string* tbl_name;
+ const CreationMetadata* creation_metadata;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_update_creation_metadata_result__isset {
+ _ThriftHiveMetastore_update_creation_metadata_result__isset() : o1(false), o2(false), o3(false) {}
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_update_creation_metadata_result__isset;
+
+class ThriftHiveMetastore_update_creation_metadata_result {
+ public:
+
+ ThriftHiveMetastore_update_creation_metadata_result(const ThriftHiveMetastore_update_creation_metadata_result&);
+ ThriftHiveMetastore_update_creation_metadata_result& operator=(const ThriftHiveMetastore_update_creation_metadata_result&);
+ ThriftHiveMetastore_update_creation_metadata_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_update_creation_metadata_result() throw();
+ MetaException o1;
+ InvalidOperationException o2;
+ UnknownDBException o3;
+
+ _ThriftHiveMetastore_update_creation_metadata_result__isset __isset;
+
+ void __set_o1(const MetaException& val);
+
+ void __set_o2(const InvalidOperationException& val);
+
+ void __set_o3(const UnknownDBException& val);
+
+ bool operator == (const ThriftHiveMetastore_update_creation_metadata_result & rhs) const
+ {
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_update_creation_metadata_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_update_creation_metadata_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_update_creation_metadata_presult__isset {
+ _ThriftHiveMetastore_update_creation_metadata_presult__isset() : o1(false), o2(false), o3(false) {}
+ bool o1 :1;
+ bool o2 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_update_creation_metadata_presult__isset;
+
+class ThriftHiveMetastore_update_creation_metadata_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_update_creation_metadata_presult() throw();
+ MetaException o1;
+ InvalidOperationException o2;
+ UnknownDBException o3;
+
+ _ThriftHiveMetastore_update_creation_metadata_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
typedef struct _ThriftHiveMetastore_get_table_names_by_filter_args__isset {
_ThriftHiveMetastore_get_table_names_by_filter_args__isset() : dbname(false), filter(false), max_tables(true) {}
bool dbname :1;
@@ -23282,6 +23420,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names);
void send_get_materialization_invalidation_info(const std::string& dbname, const std::vector<std::string> & tbl_names);
void recv_get_materialization_invalidation_info(std::map<std::string, Materialization> & _return);
+ void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
+ void send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
+ void recv_update_creation_metadata();
void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables);
void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables);
void recv_get_table_names_by_filter(std::vector<std::string> & _return);
@@ -23761,6 +23902,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
void process_get_table_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_get_table_objects_by_name_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_get_materialization_invalidation_info(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_update_creation_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_get_table_names_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_alter_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_alter_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
@@ -23946,6 +24088,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
processMap_["get_table_req"] = &ThriftHiveMetastoreProcessor::process_get_table_req;
processMap_["get_table_objects_by_name_req"] = &ThriftHiveMetastoreProcessor::process_get_table_objects_by_name_req;
processMap_["get_materialization_invalidation_info"] = &ThriftHiveMetastoreProcessor::process_get_materialization_invalidation_info;
+ processMap_["update_creation_metadata"] = &ThriftHiveMetastoreProcessor::process_update_creation_metadata;
processMap_["get_table_names_by_filter"] = &ThriftHiveMetastoreProcessor::process_get_table_names_by_filter;
processMap_["alter_table"] = &ThriftHiveMetastoreProcessor::process_alter_table;
processMap_["alter_table_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_alter_table_with_environment_context;
@@ -24476,6 +24619,15 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
return;
}
+ void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) {
+ size_t sz = ifaces_.size();
+ size_t i = 0;
+ for (; i < (sz - 1); ++i) {
+ ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata);
+ }
+ ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata);
+ }
+
void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) {
size_t sz = ifaces_.size();
size_t i = 0;
@@ -25994,6 +26146,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names);
int32_t send_get_materialization_invalidation_info(const std::string& dbname, const std::vector<std::string> & tbl_names);
void recv_get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const int32_t seqid);
+ void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
+ int32_t send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
+ void recv_update_creation_metadata(const int32_t seqid);
void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables);
int32_t send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables);
void recv_get_table_names_by_filter(std::vector<std::string> & _return, const int32_t seqid);
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index cf9a171..f5dc9f0 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -207,6 +207,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
printf("get_materialization_invalidation_info\n");
}
+ void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) {
+ // Your implementation goes here
+ printf("update_creation_metadata\n");
+ }
+
void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) {
// Your implementation goes here
printf("get_table_names_by_filter\n");
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index aadf8f1..8f04b9d 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -21187,14 +21187,15 @@ Materialization::~Materialization() throw() {
}
-void Materialization::__set_materializationTable(const Table& val) {
- this->materializationTable = val;
-}
-
void Materialization::__set_tablesUsed(const std::set<std::string> & val) {
this->tablesUsed = val;
}
+void Materialization::__set_validTxnList(const std::string& val) {
+ this->validTxnList = val;
+__isset.validTxnList = true;
+}
+
void Materialization::__set_invalidationTime(const int64_t val) {
this->invalidationTime = val;
}
@@ -21211,7 +21212,6 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
using ::apache::thrift::protocol::TProtocolException;
- bool isset_materializationTable = false;
bool isset_tablesUsed = false;
bool isset_invalidationTime = false;
@@ -21224,14 +21224,6 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
switch (fid)
{
case 1:
- if (ftype == ::apache::thrift::protocol::T_STRUCT) {
- xfer += this->materializationTable.read(iprot);
- isset_materializationTable = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
- case 2:
if (ftype == ::apache::thrift::protocol::T_SET) {
{
this->tablesUsed.clear();
@@ -21252,6 +21244,14 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validTxnList);
+ this->__isset.validTxnList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->invalidationTime);
@@ -21269,8 +21269,6 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->readStructEnd();
- if (!isset_materializationTable)
- throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tablesUsed)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_invalidationTime)
@@ -21283,11 +21281,7 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co
apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
xfer += oprot->writeStructBegin("Materialization");
- xfer += oprot->writeFieldBegin("materializationTable", ::apache::thrift::protocol::T_STRUCT, 1);
- xfer += this->materializationTable.write(oprot);
- xfer += oprot->writeFieldEnd();
-
- xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 2);
+ xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 1);
{
xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tablesUsed.size()));
std::set<std::string> ::const_iterator _iter880;
@@ -21299,6 +21293,11 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co
}
xfer += oprot->writeFieldEnd();
+ if (this->__isset.validTxnList) {
+ xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->validTxnList);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldBegin("invalidationTime", ::apache::thrift::protocol::T_I64, 3);
xfer += oprot->writeI64(this->invalidationTime);
xfer += oprot->writeFieldEnd();
@@ -21310,27 +21309,30 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co
void swap(Materialization &a, Materialization &b) {
using ::std::swap;
- swap(a.materializationTable, b.materializationTable);
swap(a.tablesUsed, b.tablesUsed);
+ swap(a.validTxnList, b.validTxnList);
swap(a.invalidationTime, b.invalidationTime);
+ swap(a.__isset, b.__isset);
}
Materialization::Materialization(const Materialization& other881) {
- materializationTable = other881.materializationTable;
tablesUsed = other881.tablesUsed;
+ validTxnList = other881.validTxnList;
invalidationTime = other881.invalidationTime;
+ __isset = other881.__isset;
}
Materialization& Materialization::operator=(const Materialization& other882) {
- materializationTable = other882.materializationTable;
tablesUsed = other882.tablesUsed;
+ validTxnList = other882.validTxnList;
invalidationTime = other882.invalidationTime;
+ __isset = other882.__isset;
return *this;
}
void Materialization::printTo(std::ostream& out) const {
using ::apache::thrift::to_string;
out << "Materialization(";
- out << "materializationTable=" << to_string(materializationTable);
- out << ", " << "tablesUsed=" << to_string(tablesUsed);
+ out << "tablesUsed=" << to_string(tablesUsed);
+ out << ", " << "validTxnList="; (__isset.validTxnList ? (out << to_string(validTxnList)) : (out << "<null>"));
out << ", " << "invalidationTime=" << to_string(invalidationTime);
out << ")";
}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 4c09bc8..c250893 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -8707,32 +8707,40 @@ inline std::ostream& operator<<(std::ostream& out, const TableMeta& obj)
return out;
}
+typedef struct _Materialization__isset {
+ _Materialization__isset() : validTxnList(false) {}
+ bool validTxnList :1;
+} _Materialization__isset;
class Materialization {
public:
Materialization(const Materialization&);
Materialization& operator=(const Materialization&);
- Materialization() : invalidationTime(0) {
+ Materialization() : validTxnList(), invalidationTime(0) {
}
virtual ~Materialization() throw();
- Table materializationTable;
std::set<std::string> tablesUsed;
+ std::string validTxnList;
int64_t invalidationTime;
- void __set_materializationTable(const Table& val);
+ _Materialization__isset __isset;
void __set_tablesUsed(const std::set<std::string> & val);
+ void __set_validTxnList(const std::string& val);
+
void __set_invalidationTime(const int64_t val);
bool operator == (const Materialization & rhs) const
{
- if (!(materializationTable == rhs.materializationTable))
- return false;
if (!(tablesUsed == rhs.tablesUsed))
return false;
+ if (__isset.validTxnList != rhs.__isset.validTxnList)
+ return false;
+ else if (__isset.validTxnList && !(validTxnList == rhs.validTxnList))
+ return false;
if (!(invalidationTime == rhs.invalidationTime))
return false;
return true;
[5/5] hive git commit: HIVE-18387: Minimize time that REBUILD locks
the materialized view (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Posted by jc...@apache.org.
HIVE-18387: Minimize time that REBUILD locks the materialized view (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9e27ad08
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9e27ad08
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9e27ad08
Branch: refs/heads/master
Commit: 9e27ad08f579f38231cfe21f92de2b0f4ad4aaeb
Parents: 01f34e4
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Fri Feb 16 02:00:02 2018 -0800
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Feb 16 03:06:54 2018 -0800
----------------------------------------------------------------------
.../listener/DummyRawStoreFailEvent.java | 7 +
.../hive/ql/parse/TestReplicationScenarios.java | 5 +-
.../org/apache/hadoop/hive/ql/ErrorMsg.java | 1 +
.../org/apache/hadoop/hive/ql/HookRunner.java | 2 -
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 60 +-
.../hive/ql/exec/MaterializedViewDesc.java | 57 +
.../hive/ql/exec/MaterializedViewTask.java | 86 +
.../apache/hadoop/hive/ql/exec/TaskFactory.java | 3 +
.../MaterializedViewRegistryUpdateHook.java | 103 -
.../apache/hadoop/hive/ql/metadata/Hive.java | 11 +
.../hive/ql/optimizer/GenMapRedUtils.java | 22 +-
.../hive/ql/parse/BaseSemanticAnalyzer.java | 4 +-
...MaterializedViewRebuildSemanticAnalyzer.java | 99 +
.../hadoop/hive/ql/parse/ParseContext.java | 9 +-
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 56 +-
.../hive/ql/parse/SemanticAnalyzerFactory.java | 3 +-
.../hadoop/hive/ql/parse/TaskCompiler.java | 47 +-
.../hadoop/hive/ql/plan/ImportTableDesc.java | 2 +
.../hadoop/hive/ql/stats/BasicStatsTask.java | 2 -
...alized_view_authorization_rebuild_no_grant.q | 3 -
.../test/queries/clientpositive/druidmini_mv.q | 40 +-
.../materialized_view_create_rewrite_4.q | 6 +
...ed_view_authorization_rebuild_no_grant.q.out | 8 +-
...lized_view_authorization_rebuild_other.q.out | 2 +-
.../clientpositive/druid/druidmini_mv.q.out | 314 +-
.../materialized_view_create_rewrite_3.q.out | 87 +-
...ized_view_create_rewrite_rebuild_dummy.q.out | 87 +-
.../materialized_view_create_rewrite_3.q.out | 88 +-
.../materialized_view_create_rewrite_4.q.out | 209 +-
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 477 +++
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 155 +
.../ThriftHiveMetastore_server.skeleton.cpp | 5 +
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 52 +-
.../gen/thrift/gen-cpp/hive_metastore_types.h | 18 +-
.../hive/metastore/api/Materialization.java | 212 +-
.../hive/metastore/api/ThriftHiveMetastore.java | 2754 +++++++++++++-----
.../gen-php/metastore/ThriftHiveMetastore.php | 321 ++
.../src/gen/thrift/gen-php/metastore/Types.php | 53 +-
.../hive_metastore/ThriftHiveMetastore-remote | 7 +
.../hive_metastore/ThriftHiveMetastore.py | 263 ++
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 35 +-
.../gen/thrift/gen-rb/hive_metastore_types.rb | 7 +-
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 72 +
.../hadoop/hive/metastore/HiveMetaStore.java | 5 +
.../hive/metastore/HiveMetaStoreClient.java | 7 +
.../hadoop/hive/metastore/IMetaStoreClient.java | 7 +
.../MaterializationInvalidationInfo.java | 5 +-
.../MaterializationsInvalidationCache.java | 57 +-
.../hadoop/hive/metastore/ObjectStore.java | 41 +-
.../apache/hadoop/hive/metastore/RawStore.java | 4 +
.../hive/metastore/cache/CachedStore.java | 7 +
.../src/main/thrift/hive_metastore.thrift | 6 +-
.../DummyRawStoreControlledCommit.java | 7 +
.../DummyRawStoreForJdoConnection.java | 6 +
54 files changed, 4636 insertions(+), 1370 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 78b2637..a3725c5 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -248,6 +249,12 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm)
+ throws MetaException {
+ objectStore.updateCreationMetadata(dbname, tablename, cm);
+ }
+
+ @Override
public void alterTable(String dbName, String name, Table newTable)
throws InvalidObjectException, MetaException {
if (shouldEventSucceed) {
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index d763666..41c89b1 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -2228,9 +2228,8 @@ public class TestReplicationScenarios {
run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver);
verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver);
- // TODO: Enable back when HIVE-18387 goes in, as it fixes the issue.
- // The problem is that alter for stats is removing the metadata information.
- // HIVE-18387 rewrites that logic and will fix the issue.
+ // TODO: This does not work because materialized views need the creation metadata
+ // to be updated in case tables used were replicated to a different database.
//run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view AS SELECT a FROM " + dbName + ".ptned where b=1", driver);
//verifySetup("SELECT a from " + dbName + ".mat_view", ptn_data_1, driver);
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 883dcda..6087e02 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -455,6 +455,7 @@ public enum ErrorMsg {
"Alter table with non-partitioned table does not support cascade"),
HIVE_GROUPING_SETS_SIZE_LIMIT(10411,
"Grouping sets size cannot be greater than 64"),
+ REBUILD_NO_MATERIALIZED_VIEW(10412, "Rebuild command only valid for materialized views"),
//========================== 20000 range starts here ========================//
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
index 52e99f9..2a32a51 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
import org.apache.hadoop.hive.ql.hooks.Hook;
import org.apache.hadoop.hive.ql.hooks.HookContext;
import org.apache.hadoop.hive.ql.hooks.HookUtils;
-import org.apache.hadoop.hive.ql.hooks.MaterializedViewRegistryUpdateHook;
import org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook;
import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook;
import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext;
@@ -83,7 +82,6 @@ public class HookRunner {
if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED)) {
queryHooks.add(new MetricsQueryLifeTimeHook());
}
- queryHooks.add(new MaterializedViewRegistryUpdateHook());
}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 802349f..3716c15 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4923,39 +4923,30 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
throw new HiveException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(crtView.getViewName()));
}
- if (crtView.isMaterialized()) {
- // We need to update the status of the creation signature
- CreationMetadata cm =
- new CreationMetadata(oldview.getDbName(), oldview.getTableName(),
- ImmutableSet.copyOf(crtView.getTablesUsed()));
- cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY));
- oldview.getTTable().setCreationMetadata(cm);
- db.alterTable(crtView.getViewName(), oldview, null);
- // This is a replace/rebuild, so we need an exclusive lock
- addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_EXCLUSIVE));
- } else {
- // replace existing view
- // remove the existing partition columns from the field schema
- oldview.setViewOriginalText(crtView.getViewOriginalText());
- oldview.setViewExpandedText(crtView.getViewExpandedText());
- oldview.setFields(crtView.getSchema());
- if (crtView.getComment() != null) {
- oldview.setProperty("comment", crtView.getComment());
- }
- if (crtView.getTblProps() != null) {
- oldview.getTTable().getParameters().putAll(crtView.getTblProps());
- }
- oldview.setPartCols(crtView.getPartCols());
- if (crtView.getInputFormat() != null) {
- oldview.setInputFormatClass(crtView.getInputFormat());
- }
- if (crtView.getOutputFormat() != null) {
- oldview.setOutputFormatClass(crtView.getOutputFormat());
- }
- oldview.checkValidity(null);
- db.alterTable(crtView.getViewName(), oldview, null);
- addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
- }
+ // It should not be a materialized view
+ assert !crtView.isMaterialized();
+
+ // replace existing view
+ // remove the existing partition columns from the field schema
+ oldview.setViewOriginalText(crtView.getViewOriginalText());
+ oldview.setViewExpandedText(crtView.getViewExpandedText());
+ oldview.setFields(crtView.getSchema());
+ if (crtView.getComment() != null) {
+ oldview.setProperty("comment", crtView.getComment());
+ }
+ if (crtView.getTblProps() != null) {
+ oldview.getTTable().getParameters().putAll(crtView.getTblProps());
+ }
+ oldview.setPartCols(crtView.getPartCols());
+ if (crtView.getInputFormat() != null) {
+ oldview.setInputFormatClass(crtView.getInputFormat());
+ }
+ if (crtView.getOutputFormat() != null) {
+ oldview.setOutputFormatClass(crtView.getOutputFormat());
+ }
+ oldview.checkValidity(null);
+ db.alterTable(crtView.getViewName(), oldview, null);
+ addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
} else {
// We create new view
Table tbl = crtView.toTable(conf);
@@ -4977,8 +4968,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
return 0;
}
- private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException {
-
+ private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException {
if (truncateTableDesc.getColumnIndexes() != null) {
ColumnTruncateWork truncateWork = new ColumnTruncateWork(
truncateTableDesc.getColumnIndexes(), truncateTableDesc.getInputDir(),
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java
new file mode 100644
index 0000000..1e28ca8
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewDesc.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.exec;
+
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+import java.io.Serializable;
+
+@Explain(displayName = "Materialized View Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class MaterializedViewDesc implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private final String viewName;
+ private final boolean retrieveAndInclude;
+ private final boolean disableRewrite;
+ private final boolean updateCreationMetadata;
+
+ public MaterializedViewDesc(String viewName, boolean retrieveAndInclude, boolean disableRewrite,
+ boolean updateCreationMetadata) {
+ this.viewName = viewName;
+ this.retrieveAndInclude = retrieveAndInclude;
+ this.disableRewrite = disableRewrite;
+ this.updateCreationMetadata = updateCreationMetadata;
+ }
+
+ public String getViewName() {
+ return viewName;
+ }
+
+ public boolean isRetrieveAndInclude() {
+ return retrieveAndInclude;
+ }
+
+ public boolean isDisableRewrite() {
+ return disableRewrite;
+ }
+
+ public boolean isUpdateCreationMetadata() {
+ return updateCreationMetadata;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java
new file mode 100644
index 0000000..2b345d6
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+import org.apache.hadoop.hive.ql.DriverContext;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
+import org.apache.hadoop.hive.ql.plan.api.StageType;
+
+import java.io.Serializable;
+
+/**
+ * This task does some work related to materialized views. In particular, it adds
+ * or removes the materialized view from the registry if needed, or registers new
+ * creation metadata.
+ */
+public class MaterializedViewTask extends Task<MaterializedViewDesc> implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ public MaterializedViewTask() {
+ super();
+ }
+
+ @Override
+ public int execute(DriverContext driverContext) {
+ if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
+ return 0;
+ }
+ try {
+ if (getWork().isRetrieveAndInclude()) {
+ Hive db = Hive.get(conf);
+ Table mvTable = db.getTable(getWork().getViewName());
+ HiveMaterializedViewsRegistry.get().createMaterializedView(db.getConf(), mvTable);
+ } else if (getWork().isDisableRewrite()) {
+ // Disabling rewriting, removing from cache
+ String[] names = getWork().getViewName().split("\\.");
+ HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]);
+ } else if (getWork().isUpdateCreationMetadata()) {
+ // We need to update the status of the creation signature
+ Hive db = Hive.get(conf);
+ Table mvTable = db.getTable(getWork().getViewName());
+ CreationMetadata cm =
+ new CreationMetadata(mvTable.getDbName(), mvTable.getTableName(),
+ ImmutableSet.copyOf(mvTable.getCreationMetadata().getTablesUsed()));
+ cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY));
+ db.updateCreationMetadata(mvTable.getDbName(), mvTable.getTableName(), cm);
+ }
+ } catch (HiveException e) {
+ LOG.debug("Exception during materialized view cache update", e);
+ }
+ return 0;
+ }
+
+ @Override
+ public StageType getType() {
+ return StageType.DDL;
+ }
+
+ @Override
+ public String getName() {
+ return MaterializedViewTask.class.getSimpleName();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
index 83590e2..d049c37 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
@@ -81,6 +81,9 @@ public final class TaskFactory {
taskvec.add(new TaskTuple<CopyWork>(CopyWork.class, CopyTask.class));
taskvec.add(new TaskTuple<ReplCopyWork>(ReplCopyWork.class, ReplCopyTask.class));
taskvec.add(new TaskTuple<DDLWork>(DDLWork.class, DDLTask.class));
+ taskvec.add(new TaskTuple<MaterializedViewDesc>(
+ MaterializedViewDesc.class,
+ MaterializedViewTask.class));
taskvec.add(new TaskTuple<FunctionWork>(FunctionWork.class,
FunctionTask.class));
taskvec
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java
deleted file mode 100644
index e886399..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/MaterializedViewRegistryUpdateHook.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.hooks;
-
-import java.io.Serializable;
-import java.util.List;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.ql.exec.DDLTask;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.TaskRunner;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Updates the materialized view registry after changes.
- */
-public class MaterializedViewRegistryUpdateHook implements QueryLifeTimeHook {
-
- private static final Logger LOG = LoggerFactory.getLogger(MaterializedViewRegistryUpdateHook.class);
-
- @Override
- public void beforeCompile(QueryLifeTimeHookContext ctx) {
- }
-
- @Override
- public void afterCompile(QueryLifeTimeHookContext ctx, boolean hasError) {
- }
-
- @Override
- public void beforeExecution(QueryLifeTimeHookContext ctx) {
- }
-
- @Override
- public void afterExecution(QueryLifeTimeHookContext ctx, boolean hasError) {
- if (hasError) {
- return;
- }
- HiveConf hiveConf = ctx.getHiveConf();
- try {
- List<TaskRunner> completedTasks = ctx.getHookContext().getCompleteTaskList();
- for (TaskRunner taskRunner : completedTasks) {
- Task<? extends Serializable> task = taskRunner.getTask();
- if (task instanceof DDLTask) {
- DDLTask ddlTask = (DDLTask) task;
- DDLWork work = ddlTask.getWork();
- String tableName = null;
- boolean isRewriteEnabled = false;
- if (work.getCreateViewDesc() != null && work.getCreateViewDesc().isMaterialized()) {
- tableName = work.getCreateViewDesc().toTable(hiveConf).getFullyQualifiedName();
- isRewriteEnabled = work.getCreateViewDesc().isRewriteEnabled();
- } else if (work.getAlterMaterializedViewDesc() != null) {
- tableName = work.getAlterMaterializedViewDesc().getMaterializedViewName();
- isRewriteEnabled = work.getAlterMaterializedViewDesc().isRewriteEnable();
- } else {
- continue;
- }
-
- if (isRewriteEnabled) {
- Hive db = Hive.get();
- Table mvTable = db.getTable(tableName);
- HiveMaterializedViewsRegistry.get().createMaterializedView(db.getConf(), mvTable);
- } else if (work.getAlterMaterializedViewDesc() != null) {
- // Disabling rewriting, removing from cache
- String[] names = tableName.split("\\.");
- HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]);
- }
- }
- }
- } catch (HiveException e) {
- if (HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING)) {
- String message = "Error updating materialized view cache; consider disabling: " + ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING.varname;
- LOG.error(message, e);
- throw new RuntimeException(message, e);
- } else {
- LOG.debug("Exception during materialized view cache update", e);
- }
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index a45cac6..7b7e140 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -90,6 +90,7 @@ import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.CompactionResponse;
import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -124,6 +125,7 @@ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMMapping;
import org.apache.hadoop.hive.metastore.api.WMNullablePool;
@@ -658,6 +660,15 @@ public class Hive {
}
}
+ public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+ throws HiveException {
+ try {
+ getMSC().updateCreationMetadata(dbName, tableName, cm);
+ } catch (TException e) {
+ throw new HiveException("Unable to update creation metadata " + e.getMessage(), e);
+ }
+ }
+
/**
* Updates the existing partition metadata with the new metadata.
*
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 59c0fe4..3023144 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -1509,22 +1509,12 @@ public final class GenMapRedUtils {
table = null;
}
} else if (mvWork.getLoadFileWork().getCreateViewDesc() != null) {
- if (mvWork.getLoadFileWork().getCreateViewDesc().isReplace()) {
- // ALTER MV ... REBUILD
- String tableName = mvWork.getLoadFileWork().getCreateViewDesc().getViewName();
- try {
- table = Hive.get().getTable(tableName);
- } catch (HiveException e) {
- throw new RuntimeException("unexpected; MV should be present already..: " + tableName, e);
- }
- } else {
- // CREATE MATERIALIZED VIEW ...
- try {
- table = mvWork.getLoadFileWork().getCreateViewDesc().toTable(hconf);
- } catch (HiveException e) {
- LOG.debug("can't pre-create table for MV", e);
- table = null;
- }
+ // CREATE MATERIALIZED VIEW ...
+ try {
+ table = mvWork.getLoadFileWork().getCreateViewDesc().toTable(hconf);
+ } catch (HiveException e) {
+ LOG.debug("can't pre-create table for MV", e);
+ table = null;
}
} else {
throw new RuntimeException("unexpected; this should be a CTAS or a CREATE/REBUILD MV - however no desc present");
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index d18dba5..171825e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -1185,9 +1185,7 @@ public abstract class BaseSemanticAnalyzer {
|| ast.getToken().getType() == HiveParser.TOK_TABLE_PARTITION
|| ast.getToken().getType() == HiveParser.TOK_TABTYPE
|| ast.getToken().getType() == HiveParser.TOK_CREATETABLE
- || ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW
- || (ast.getToken().getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW &&
- ast.getChild(1).getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD));
+ || ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW);
int childIndex = 0;
numDynParts = 0;
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java
new file mode 100644
index 0000000..75eb50c
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.parse;
+
+import org.apache.hadoop.hive.common.HiveStatsUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.conf.HiveVariableSource;
+import org.apache.hadoop.hive.conf.VariableSubstitution;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * MaterializedViewRebuildSemanticAnalyzer.
+ * Rewrites ALTER MATERIALIZED VIEW _mv_name_ REBUILD statement into
+ * INSERT OVERWRITE TABLE _mv_name_ _mv_query_ .
+ */
+public class MaterializedViewRebuildSemanticAnalyzer extends CalcitePlanner {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MaterializedViewRebuildSemanticAnalyzer.class);
+ static final private LogHelper console = new LogHelper(LOG);
+
+
+ public MaterializedViewRebuildSemanticAnalyzer(QueryState queryState) throws SemanticException {
+ super(queryState);
+ }
+
+
+ @Override
+ public void analyzeInternal(ASTNode ast) throws SemanticException {
+ if (rewrittenRebuild) {
+ super.analyzeInternal(ast);
+ return;
+ }
+
+ String[] qualifiedTableName = getQualifiedTableName((ASTNode) ast.getChild(0));
+ String dbDotTable = getDotName(qualifiedTableName);
+ ASTNode rewrittenAST;
+ // We need to go lookup the table and get the select statement and then parse it.
+ try {
+ Table tab = getTableObjectByName(dbDotTable, true);
+ if (!tab.isMaterializedView()) {
+ // Cannot rebuild not materialized view
+ throw new SemanticException(ErrorMsg.REBUILD_NO_MATERIALIZED_VIEW);
+ }
+ // We need to use the expanded text for the materialized view, as it will contain
+ // the qualified table aliases, etc.
+ String viewText = tab.getViewExpandedText();
+ if (viewText.trim().isEmpty()) {
+ throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY);
+ }
+ Context ctx = new Context(queryState.getConf());
+ rewrittenAST = ParseUtils.parse("insert overwrite table `" +
+ dbDotTable + "` " + viewText, ctx);
+ this.ctx.addRewrittenStatementContext(ctx);
+ } catch (Exception e) {
+ throw new SemanticException(e);
+ }
+ rewrittenRebuild = true;
+ LOG.info("Rebuilding view " + dbDotTable);
+ super.analyzeInternal(rewrittenAST);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index 4c41920..d890b31 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.exec.GroupByOperator;
import org.apache.hadoop.hive.ql.exec.JoinOperator;
import org.apache.hadoop.hive.ql.exec.ListSinkOperator;
import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc;
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator;
@@ -118,6 +119,7 @@ public class ParseContext {
private AnalyzeRewriteContext analyzeRewrite;
private CreateTableDesc createTableDesc;
private CreateViewDesc createViewDesc;
+ private MaterializedViewDesc materializedViewUpdateDesc;
private boolean reduceSinkAddedBySortedDynPartition;
private Map<SelectOperator, Table> viewProjectToViewSchema;
@@ -194,7 +196,7 @@ public class ParseContext {
Map<String, ReadEntity> viewAliasToInput,
List<ReduceSinkOperator> reduceSinkOperatorsAddedByEnforceBucketingSorting,
AnalyzeRewriteContext analyzeRewrite, CreateTableDesc createTableDesc,
- CreateViewDesc createViewDesc, QueryProperties queryProperties,
+ CreateViewDesc createViewDesc, MaterializedViewDesc materializedViewUpdateDesc, QueryProperties queryProperties,
Map<SelectOperator, Table> viewProjectToTableSchema, Set<FileSinkDesc> acidFileSinks) {
this.queryState = queryState;
this.conf = queryState.getConf();
@@ -225,6 +227,7 @@ public class ParseContext {
this.analyzeRewrite = analyzeRewrite;
this.createTableDesc = createTableDesc;
this.createViewDesc = createViewDesc;
+ this.materializedViewUpdateDesc = materializedViewUpdateDesc;
this.queryProperties = queryProperties;
this.viewProjectToViewSchema = viewProjectToTableSchema;
this.needViewColumnAuthorization = viewProjectToTableSchema != null
@@ -605,6 +608,10 @@ public class ParseContext {
return createViewDesc;
}
+ public MaterializedViewDesc getMaterializedViewUpdateDesc() {
+ return materializedViewUpdateDesc;
+ }
+
public void setReduceSinkAddedBySortedDynPartition(
final boolean reduceSinkAddedBySortedDynPartition) {
this.reduceSinkAddedBySortedDynPartition = reduceSinkAddedBySortedDynPartition;
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 9d77f49..19fc6a9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -43,6 +43,7 @@ import java.util.UUID;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
+import com.google.common.collect.ImmutableSet;
import org.antlr.runtime.ClassicToken;
import org.antlr.runtime.CommonToken;
import org.antlr.runtime.TokenRewriteStream;
@@ -63,12 +64,14 @@ import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.common.StatsSetupConst.StatDB;
+import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -98,6 +101,7 @@ import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.GroupByOperator;
import org.apache.hadoop.hive.ql.exec.JoinOperator;
import org.apache.hadoop.hive.ql.exec.LimitOperator;
+import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc;
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.exec.OperatorFactory;
import org.apache.hadoop.hive.ql.exec.RecordReader;
@@ -308,6 +312,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
Map<String, PrunedPartitionList> prunedPartitions;
protected List<FieldSchema> resultSchema;
protected CreateViewDesc createVwDesc;
+ protected MaterializedViewDesc materializedViewUpdateDesc;
protected ArrayList<String> viewsExpanded;
protected ASTNode viewSelect;
protected final UnparseTranslator unparseTranslator;
@@ -330,6 +335,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// flag for no scan during analyze ... compute statistics
protected boolean noscan;
+ // whether this is a mv rebuild rewritten expression
+ protected boolean rewrittenRebuild = false;
+
protected volatile boolean disableJoinMerge = false;
protected final boolean defaultJoinMerge;
@@ -453,6 +461,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
nameToSplitSample.clear();
resultSchema = null;
createVwDesc = null;
+ materializedViewUpdateDesc = null;
viewsExpanded = null;
viewSelect = null;
ctesExpanded = null;
@@ -490,11 +499,13 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return new ParseContext(queryState, opToPartPruner, opToPartList, topOps,
new HashSet<JoinOperator>(joinContext.keySet()),
new HashSet<SMBMapJoinOperator>(smbMapJoinContext.keySet()),
- loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx,
+ loadTableWork, loadFileWork, columnStatsAutoGatherContexts,
+ ctx, idToTableNameMap, destTableId, uCtx,
listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject,
opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks,
opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting,
- analyzeRewrite, tableDesc, createVwDesc, queryProperties, viewProjectToTableSchema, acidFileSinks);
+ analyzeRewrite, tableDesc, createVwDesc, materializedViewUpdateDesc,
+ queryProperties, viewProjectToTableSchema, acidFileSinks);
}
public CompilationOpContext getOpContext() {
@@ -1984,7 +1995,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
switch (ast.getToken().getType()) {
case HiveParser.TOK_TAB: {
TableSpec ts = new TableSpec(db, conf, ast);
- if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
+ if (ts.tableHandle.isView() ||
+ (!rewrittenRebuild && ts.tableHandle.isMaterializedView())) {
throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
}
@@ -6899,6 +6911,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
createInsertDesc(dest_tab, overwrite);
}
+ if (dest_tab.isMaterializedView()) {
+ materializedViewUpdateDesc = new MaterializedViewDesc(
+ dest_tab.getFullyQualifiedName(), false, false, true);
+ }
+
WriteEntity output = generateTableWriteEntity(
dest, dest_tab, partSpec, ltd, dpCtx, isNonNativeTable);
ctx.getLoadTableOutputMap().put(ltd, output);
@@ -7465,7 +7482,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return dpCtx;
}
-
private void createInsertDesc(Table table, boolean overwrite) {
Task<? extends Serializable>[] tasks = new Task[this.rootTasks.size()];
tasks = this.rootTasks.toArray(tasks);
@@ -11207,7 +11223,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
- private Table getTableObjectByName(String tableName, boolean throwException) throws HiveException {
+ protected Table getTableObjectByName(String tableName, boolean throwException) throws HiveException {
if (!tabNameToTabObject.containsKey(tableName)) {
Table table = db.getTable(tableName, throwException);
if (table != null) {
@@ -11475,8 +11491,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
// 3. analyze create view command
if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW ||
ast.getToken().getType() == HiveParser.TOK_CREATE_MATERIALIZED_VIEW ||
- (ast.getToken().getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW &&
- ast.getChild(1).getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD) ||
(ast.getToken().getType() == HiveParser.TOK_ALTERVIEW &&
ast.getChild(1).getType() == HiveParser.TOK_QUERY)) {
child = analyzeCreateView(ast, qb, plannerCtx);
@@ -11702,7 +11716,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner,
globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner,
viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting,
- analyzeRewrite, tableDesc, createVwDesc, queryProperties, viewProjectToTableSchema, acidFileSinks);
+ analyzeRewrite, tableDesc, createVwDesc, materializedViewUpdateDesc,
+ queryProperties, viewProjectToTableSchema, acidFileSinks);
// Set the semijoin hints in parse context
pCtx.setSemiJoinHints(parseSemiJoinHint(getQB().getParseInfo().getHintList()));
@@ -12771,10 +12786,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
case HiveParser.TOK_ORREPLACE:
orReplace = true;
break;
- case HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD:
- isMaterialized = true;
- isRebuild = true;
- break;
case HiveParser.TOK_QUERY:
// For CBO
if (plannerCtx != null) {
@@ -12850,27 +12861,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
qb.setViewDesc(createVwDesc);
- if (isRebuild) {
- // We need to go lookup the table and get the select statement and then parse it.
- try {
- Table tab = getTableObjectByName(dbDotTable, true);
- // We need to use the expanded text for the materialized view, as it will contain
- // the qualified table aliases, etc.
- String viewText = tab.getViewExpandedText();
- if (viewText.trim().isEmpty()) {
- throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY);
- }
- Context ctx = new Context(queryState.getConf());
- selectStmt = ParseUtils.parse(viewText, ctx);
- // For CBO
- if (plannerCtx != null) {
- plannerCtx.setViewToken(selectStmt);
- }
- } catch (Exception e) {
- throw new SemanticException(e);
- }
- }
-
return selectStmt;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
index 34963ff..78f83ef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
@@ -277,8 +277,7 @@ public final class SemanticAnalyzerFactory {
case HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD:
opType = commandType.get(child.getType());
queryState.setCommandType(opType);
- return HiveConf.getBoolVar(queryState.getConf(), HiveConf.ConfVars.HIVE_CBO_ENABLED) ?
- new CalcitePlanner(queryState) : new SemanticAnalyzer(queryState);
+ return new MaterializedViewRebuildSemanticAnalyzer(queryState);
}
// Operation not recognized, set to null and let upper level handle this case
queryState.setCommandType(null);
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 3122db8..5e94bb7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -28,6 +28,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.apache.hadoop.hive.ql.exec.DDLTask;
+import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -322,9 +324,7 @@ public abstract class TaskCompiler {
if (pCtx.getQueryProperties().isCTAS() && !pCtx.getCreateTable().isMaterialization()) {
// generate a DDL task and make it a dependent task of the leaf
CreateTableDesc crtTblDesc = pCtx.getCreateTable();
-
crtTblDesc.validate(conf);
-
Task<? extends Serializable> crtTblTask = TaskFactory.get(new DDLWork(
inputs, outputs, crtTblDesc), conf);
patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask);
@@ -334,6 +334,16 @@ public abstract class TaskCompiler {
Task<? extends Serializable> crtViewTask = TaskFactory.get(new DDLWork(
inputs, outputs, viewDesc), conf);
patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtViewTask);
+ } else if (pCtx.getMaterializedViewUpdateDesc() != null) {
+ // If there is a materialized view update desc, we create introduce it at the end
+ // of the tree.
+ MaterializedViewDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc();
+ Set<Task<? extends Serializable>> leafTasks = new LinkedHashSet<Task<? extends Serializable>>();
+ getLeafTasks(rootTasks, leafTasks);
+ Task<? extends Serializable> materializedViewTask = TaskFactory.get(materializedViewDesc, conf);
+ for (Task<? extends Serializable> task : leafTasks) {
+ task.addDependentTask(materializedViewTask);
+ }
}
if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) {
@@ -464,6 +474,7 @@ public abstract class TaskCompiler {
HashSet<Task<? extends Serializable>> leaves = new LinkedHashSet<>();
getLeafTasks(rootTasks, leaves);
assert (leaves.size() > 0);
+ Task<? extends Serializable> targetTask = createTask;
for (Task<? extends Serializable> task : leaves) {
if (task instanceof StatsTask) {
// StatsTask require table to already exist
@@ -474,10 +485,36 @@ public abstract class TaskCompiler {
parentOfCrtTblTask.removeDependentTask(task);
}
createTask.addDependentTask(task);
+ targetTask = task;
} else {
task.addDependentTask(createTask);
}
}
+
+ // Add task to insert / delete materialized view from registry if needed
+ if (createTask instanceof DDLTask) {
+ DDLTask ddlTask = (DDLTask) createTask;
+ DDLWork work = ddlTask.getWork();
+ String tableName = null;
+ boolean retrieveAndInclude = false;
+ boolean disableRewrite = false;
+ if (work.getCreateViewDesc() != null && work.getCreateViewDesc().isMaterialized()) {
+ tableName = work.getCreateViewDesc().getViewName();
+ retrieveAndInclude = work.getCreateViewDesc().isRewriteEnabled();
+ } else if (work.getAlterMaterializedViewDesc() != null) {
+ tableName = work.getAlterMaterializedViewDesc().getMaterializedViewName();
+ if (work.getAlterMaterializedViewDesc().isRewriteEnable()) {
+ retrieveAndInclude = true;
+ } else {
+ disableRewrite = true;
+ }
+ } else {
+ return;
+ }
+ targetTask.addDependentTask(
+ TaskFactory.get(
+ new MaterializedViewDesc(tableName, retrieveAndInclude, disableRewrite, false), conf));
+ }
}
/**
@@ -588,14 +625,16 @@ public abstract class TaskCompiler {
ParseContext clone = new ParseContext(queryState,
pCtx.getOpToPartPruner(), pCtx.getOpToPartList(), pCtx.getTopOps(),
pCtx.getJoinOps(), pCtx.getSmbMapJoinOps(),
- pCtx.getLoadTableWork(), pCtx.getLoadFileWork(), pCtx.getColumnStatsAutoGatherContexts(), pCtx.getContext(),
+ pCtx.getLoadTableWork(), pCtx.getLoadFileWork(),
+ pCtx.getColumnStatsAutoGatherContexts(), pCtx.getContext(),
pCtx.getIdToTableNameMap(), pCtx.getDestTableId(), pCtx.getUCtx(),
pCtx.getListMapJoinOpsNoReducer(),
pCtx.getPrunedPartitions(), pCtx.getTabNameToTabObject(), pCtx.getOpToSamplePruner(), pCtx.getGlobalLimitCtx(),
pCtx.getNameToSplitSample(), pCtx.getSemanticInputs(), rootTasks,
pCtx.getOpToPartToSkewedPruner(), pCtx.getViewAliasToInput(),
pCtx.getReduceSinkOperatorsAddedByEnforceBucketingSorting(),
- pCtx.getAnalyzeRewrite(), pCtx.getCreateTable(), pCtx.getCreateViewDesc(),
+ pCtx.getAnalyzeRewrite(), pCtx.getCreateTable(),
+ pCtx.getCreateViewDesc(), pCtx.getMaterializedViewUpdateDesc(),
pCtx.getQueryProperties(), pCtx.getViewProjectToTableSchema(),
pCtx.getAcidSinks());
clone.setFetchTask(pCtx.getFetchTask());
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
index aef83b8..fcbac7d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
@@ -101,6 +101,8 @@ public class ImportTableDesc {
table.getSd().getSerdeInfo().getSerializationLib(),
null, // storagehandler passed as table params
table.getSd().getSerdeInfo().getParameters());
+ // TODO: If the DB name from the creation metadata for any of the tables has changed,
+ // we should update it. Currently it refers to the source database name.
this.createViewDesc.setTablesUsed(table.getCreationMetadata() != null ?
table.getCreationMetadata().getTablesUsed() : ImmutableSet.of());
} else {
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
index b483790..1d7660e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
@@ -252,8 +252,6 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
if (res == null) {
return 0;
}
- // Stats task should not set creation signature
- res.getTTable().unsetCreationMetadata();
db.alterTable(tableFullName, res, environmentContext);
if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_no_grant.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_no_grant.q b/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_no_grant.q
index a2e7d38..0ff50b0 100644
--- a/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_no_grant.q
+++ b/ql/src/test/queries/clientnegative/materialized_view_authorization_rebuild_no_grant.q
@@ -14,7 +14,4 @@ set user.name=user2;
create materialized view amvrng_mat_view as select a, c from amvrng_table;
set user.name=user1;
-revoke grant option for select on table amvrng_table from user user2;
-
-set user.name=user2;
alter materialized view amvrng_mat_view rebuild;
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/queries/clientpositive/druidmini_mv.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druidmini_mv.q b/ql/src/test/queries/clientpositive/druidmini_mv.q
index 9f8500f..21653b7 100644
--- a/ql/src/test/queries/clientpositive/druidmini_mv.q
+++ b/ql/src/test/queries/clientpositive/druidmini_mv.q
@@ -5,20 +5,29 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
set hive.strict.checks.cartesian.product=false;
set hive.materializedview.rewriting=true;
-create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true');
-
-insert into cmv_basetable values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1);
+CREATE TABLE cmv_basetable
+STORED AS orc
+TBLPROPERTIES ('transactional'='true')
+AS
+SELECT cast(current_timestamp() AS timestamp) AS t,
+ cast(a AS int) AS a,
+ cast(b AS varchar(256)) AS b,
+ cast(c AS double) AS c,
+ cast(d AS int) AS d
+FROM TABLE (
+ VALUES
+ (1, 'alfred', 10.30, 2),
+ (2, 'bob', 3.14, 3),
+ (2, 'bonnie', 172342.2, 3),
+ (3, 'calvin', 978.76, 3),
+ (3, 'charlie', 9.8, 1),
+ (3, 'charlie', 15.8, 1)) as q (a, b, c, d);
CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 2;
@@ -30,7 +39,7 @@ CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWRITE
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 3;
@@ -61,7 +70,7 @@ SELECT * FROM (
ON table1.a = table2.a);
INSERT INTO cmv_basetable VALUES
- (3, 'charlie', 15.8, 1);
+ (cast(current_timestamp() AS timestamp), 3, 'charlie', 15.8, 1);
-- TODO: CANNOT USE THE VIEW, IT IS OUTDATED
EXPLAIN
@@ -77,8 +86,13 @@ SELECT * FROM (
(SELECT a, c FROM cmv_basetable WHERE d = 3) table2
ON table1.a = table2.a);
--- REBUILD: TODO FOR MVS USING CUSTOM STORAGE HANDLERS
--- ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+-- REBUILD
+EXPLAIN
+ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD;
+
+ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD;
+
+SHOW TBLPROPERTIES cmv_mat_view2;
-- NOW IT CAN BE USED AGAIN
EXPLAIN
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q
index efc65c4..c7f050b 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q
@@ -36,6 +36,8 @@ CREATE MATERIALIZED VIEW cmv_mat_view AS
WHERE cmv_basetable_2.c > 10.0
GROUP BY cmv_basetable.a, cmv_basetable_2.c;
+DESCRIBE FORMATTED cmv_mat_view;
+
-- CANNOT USE THE VIEW, IT IS DISABLED FOR REWRITE
EXPLAIN
SELECT cmv_basetable.a
@@ -59,6 +61,8 @@ ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE;
ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE;
+DESCRIBE FORMATTED cmv_mat_view;
+
-- CANNOT USE THE VIEW, IT IS OUTDATED
EXPLAIN
SELECT cmv_basetable.a
@@ -77,6 +81,8 @@ ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+DESCRIBE FORMATTED cmv_mat_view;
+
-- NOW IT CAN BE USED AGAIN
EXPLAIN
SELECT cmv_basetable.a
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_no_grant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_no_grant.q.out b/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_no_grant.q.out
index 341fcea..f8dd4a9 100644
--- a/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_no_grant.q.out
+++ b/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_no_grant.q.out
@@ -33,10 +33,4 @@ POSTHOOK: type: CREATE_MATERIALIZED_VIEW
POSTHOOK: Input: default@amvrng_table
POSTHOOK: Output: database:default
POSTHOOK: Output: default@amvrng_mat_view
-PREHOOK: query: revoke grant option for select on table amvrng_table from user user2
-PREHOOK: type: REVOKE_PRIVILEGE
-PREHOOK: Output: default@amvrng_table
-POSTHOOK: query: revoke grant option for select on table amvrng_table from user user2
-POSTHOOK: type: REVOKE_PRIVILEGE
-POSTHOOK: Output: default@amvrng_table
-FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation CREATE_MATERIALIZED_VIEW [[SELECT with grant] on Object [type=TABLE_OR_VIEW, name=default.amvrng_table]]
+FAILED: HiveAccessControlException Permission denied: Principal [name=user1, type=USER] does not have following privileges for operation QUERY [[INSERT, DELETE] on Object [type=TABLE_OR_VIEW, name=default.amvrng_mat_view, action=INSERT_OVERWRITE]]
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_other.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_other.q.out b/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_other.q.out
index 97345bf..0bde682 100644
--- a/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_other.q.out
+++ b/ql/src/test/results/clientnegative/materialized_view_authorization_rebuild_other.q.out
@@ -27,4 +27,4 @@ POSTHOOK: type: CREATE_MATERIALIZED_VIEW
POSTHOOK: Input: default@amvro_table
POSTHOOK: Output: database:default
POSTHOOK: Output: default@amvro_mat_view
-FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation CREATE_MATERIALIZED_VIEW [[SELECT with grant] on Object [type=TABLE_OR_VIEW, name=default.amvro_table]]
+FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation QUERY [[INSERT, DELETE] on Object [type=TABLE_OR_VIEW, name=default.amvro_mat_view, action=INSERT_OVERWRITE], [SELECT] on Object [type=TABLE_OR_VIEW, name=default.amvro_table]]
http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
index 294b84a..efd6c59 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
@@ -1,38 +1,55 @@
-PREHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@cmv_basetable
-POSTHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@cmv_basetable
-PREHOOK: query: insert into cmv_basetable values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1)
-PREHOOK: type: QUERY
+PREHOOK: query: CREATE TABLE cmv_basetable
+STORED AS orc
+TBLPROPERTIES ('transactional'='true')
+AS
+SELECT cast(current_timestamp() AS timestamp) AS t,
+ cast(a AS int) AS a,
+ cast(b AS varchar(256)) AS b,
+ cast(c AS double) AS c,
+ cast(d AS int) AS d
+FROM TABLE (
+ VALUES
+ (1, 'alfred', 10.30, 2),
+ (2, 'bob', 3.14, 3),
+ (2, 'bonnie', 172342.2, 3),
+ (3, 'calvin', 978.76, 3),
+ (3, 'charlie', 9.8, 1),
+ (3, 'charlie', 15.8, 1)) as q (a, b, c, d)
+PREHOOK: type: CREATETABLE_AS_SELECT
PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: database:default
PREHOOK: Output: default@cmv_basetable
-POSTHOOK: query: insert into cmv_basetable values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1)
-POSTHOOK: type: QUERY
+POSTHOOK: query: CREATE TABLE cmv_basetable
+STORED AS orc
+TBLPROPERTIES ('transactional'='true')
+AS
+SELECT cast(current_timestamp() AS timestamp) AS t,
+ cast(a AS int) AS a,
+ cast(b AS varchar(256)) AS b,
+ cast(c AS double) AS c,
+ cast(d AS int) AS d
+FROM TABLE (
+ VALUES
+ (1, 'alfred', 10.30, 2),
+ (2, 'bob', 3.14, 3),
+ (2, 'bonnie', 172342.2, 3),
+ (3, 'calvin', 978.76, 3),
+ (3, 'charlie', 9.8, 1),
+ (3, 'charlie', 15.8, 1)) as q (a, b, c, d)
+POSTHOOK: type: CREATETABLE_AS_SELECT
POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_basetable
POSTHOOK: Lineage: cmv_basetable.a SCRIPT []
POSTHOOK: Lineage: cmv_basetable.b SCRIPT []
POSTHOOK: Lineage: cmv_basetable.c SCRIPT []
POSTHOOK: Lineage: cmv_basetable.d SCRIPT []
+POSTHOOK: Lineage: cmv_basetable.t SIMPLE []
PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 2
PREHOOK: type: CREATE_MATERIALIZED_VIEW
@@ -43,7 +60,7 @@ POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, b, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 2
POSTHOOK: type: CREATE_MATERIALIZED_VIEW
@@ -77,7 +94,7 @@ PREHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWR
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 3
PREHOOK: type: CREATE_MATERIALIZED_VIEW
@@ -88,7 +105,7 @@ POSTHOOK: query: CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REW
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
AS
-SELECT cast(current_timestamp() as timestamp with local time zone) as `__time`, a, cast(c as double)
+SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c
FROM cmv_basetable
WHERE a = 3
POSTHOOK: type: CREATE_MATERIALIZED_VIEW
@@ -103,7 +120,8 @@ POSTHOOK: query: SELECT a, c FROM cmv_mat_view2
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_mat_view2
#### A masked pattern was here ####
-6 988.5599975585938
+3 978.760009765625
+6 25.600000381469727
PREHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2
PREHOOK: type: SHOW_TBLPROPERTIES
POSTHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2
@@ -112,7 +130,7 @@ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
druid.datasource default.cmv_mat_view2
druid.segment.granularity HOUR
numFiles 0
-numRows 2
+numRows 3
rawDataSize 0
storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler
totalSize 0
@@ -137,17 +155,17 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 10770 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (a = 3) (type: boolean)
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 3 (type: int), c (type: decimal(10,2))
+ expressions: 3 (type: int), c (type: double)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -171,7 +189,8 @@ WHERE a = 3
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
#### A masked pattern was here ####
-3 9.80
+3 15.8
+3 9.8
3 978.76
Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
PREHOOK: query: EXPLAIN
@@ -198,32 +217,32 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 10770 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (a = 3) (type: boolean)
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 3 Data size: 5385 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 10770 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: ((3 = a) and (d = 3)) (type: boolean)
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1795 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1795 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 9310 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 1 Data size: 1795 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
Reduce Operator Tree:
Join Operator
condition map:
@@ -232,14 +251,14 @@ STAGE PLANS:
0
1
outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 18621 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10773 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2))
+ expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 18621 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10773 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 18621 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10773 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -268,15 +287,16 @@ POSTHOOK: query: SELECT * FROM (
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
#### A masked pattern was here ####
-3 9.80 3 978.76
+3 15.8 3 978.76
+3 9.8 3 978.76
3 978.76 3 978.76
PREHOOK: query: INSERT INTO cmv_basetable VALUES
- (3, 'charlie', 15.8, 1)
+ (cast(current_timestamp() AS timestamp), 3, 'charlie', 15.8, 1)
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@cmv_basetable
POSTHOOK: query: INSERT INTO cmv_basetable VALUES
- (3, 'charlie', 15.8, 1)
+ (cast(current_timestamp() AS timestamp), 3, 'charlie', 15.8, 1)
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@cmv_basetable
@@ -284,6 +304,7 @@ POSTHOOK: Lineage: cmv_basetable.a SCRIPT []
POSTHOOK: Lineage: cmv_basetable.b SCRIPT []
POSTHOOK: Lineage: cmv_basetable.c SCRIPT []
POSTHOOK: Lineage: cmv_basetable.d SCRIPT []
+POSTHOOK: Lineage: cmv_basetable.t SCRIPT []
Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
PREHOOK: query: EXPLAIN
SELECT * FROM (
@@ -309,32 +330,32 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (a = 3) (type: boolean)
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 3 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: ((3 = a) and (d = 3)) (type: boolean)
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3373 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3373 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 1 Data size: 3373 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
Reduce Operator Tree:
Join Operator
condition map:
@@ -343,14 +364,14 @@ STAGE PLANS:
0
1
outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 20242 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2))
+ expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 20242 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 20242 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -379,10 +400,103 @@ POSTHOOK: query: SELECT * FROM (
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
#### A masked pattern was here ####
-3 15.80 3 978.76
-3 9.80 3 978.76
+3 15.8 3 978.76
+3 15.8 3 978.76
+3 9.8 3 978.76
3 978.76 3 978.76
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: EXPLAIN
+ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-2
+ Stage-4 depends on stages: Stage-2, Stage-1, Stage-3
+ Stage-1 is a root stage
+ Stage-3 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Alter Table Operator:
+ Alter Table
+ type: drop props
+ old name: default.cmv_mat_view2
+ properties:
+ COLUMN_STATS_ACCURATE
+
+ Stage: Stage-2
+ Insert operator:
+ Insert
+
+ Stage: Stage-4
+ Materialized View Work
+
+ Stage: Stage-1
+ Pre Insert operator:
+ Pre-Insert task
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: cmv_basetable
+ Statistics: Num rows: 6 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a = 3) (type: boolean)
+ Statistics: Num rows: 3 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: CAST( t AS timestamp with local time zone) (type: timestamp with local time zone), 3 (type: int), b (type: varchar(256)), c (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 3 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, __time_granularity
+ Statistics: Num rows: 3 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: __time_granularity (type: timestamp)
+ sort order: +
+ Map-reduce partition columns: __time_granularity (type: timestamp)
+ Statistics: Num rows: 3 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double)
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: int), VALUE._col2 (type: varchar(256)), VALUE._col3 (type: double), KEY.__time_granularity (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, __time_granularity
+ Statistics: Num rows: 3 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Dp Sort State: PARTITION_SORTED
+ Statistics: Num rows: 3 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
+ output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
+ serde: org.apache.hadoop.hive.druid.serde.DruidSerDe
+ name: default.cmv_mat_view2
+
+PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cmv_basetable
+PREHOOK: Output: default@cmv_mat_view2
+POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cmv_basetable
+POSTHOOK: Output: default@cmv_mat_view2
+PREHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2
+PREHOOK: type: SHOW_TBLPROPERTIES
+POSTHOOK: query: SHOW TBLPROPERTIES cmv_mat_view2
+POSTHOOK: type: SHOW_TBLPROPERTIES
+druid.datasource default.cmv_mat_view2
+druid.segment.granularity HOUR
+#### A masked pattern was here ####
+numFiles 0
+numRows 3
+rawDataSize 0
+storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler
+totalSize 0
+#### A masked pattern was here ####
+Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product
PREHOOK: query: EXPLAIN
SELECT * FROM (
(SELECT a, c FROM cmv_basetable WHERE a = 3) table1
@@ -407,32 +521,28 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (a = 3) (type: boolean)
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: c (type: decimal(10,2))
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
- TableScan
- alias: cmv_basetable
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: ((3 = a) and (d = 3)) (type: boolean)
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3373 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: c (type: decimal(10,2))
+ expressions: c (type: double)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3373 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 17540 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: decimal(10,2))
+ Statistics: Num rows: 1 Data size: 3373 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
+ TableScan
+ alias: cmv_mat_view2
+ properties:
+ druid.query.json {"queryType":"select","dataSource":"default.cmv_mat_view2","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["c"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.type select
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ value expressions: c (type: double)
Reduce Operator Tree:
Join Operator
condition map:
@@ -440,15 +550,15 @@ STAGE PLANS:
keys:
0
1
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ outputColumnNames: _col1, _col5
+ Statistics: Num rows: 3 Data size: 10122 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2))
+ expressions: 3 (type: int), _col1 (type: double), 3 (type: int), _col5 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10122 Basic stats: PARTIAL Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 35081 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 10122 Basic stats: PARTIAL Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -460,7 +570,7 @@ STAGE PLANS:
Processor Tree:
ListSink
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [cmv_mat_view2, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product
PREHOOK: query: SELECT * FROM (
(SELECT a, c FROM cmv_basetable WHERE a = 3) table1
JOIN
@@ -468,6 +578,7 @@ PREHOOK: query: SELECT * FROM (
ON table1.a = table2.a)
PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable
+PREHOOK: Input: default@cmv_mat_view2
#### A masked pattern was here ####
POSTHOOK: query: SELECT * FROM (
(SELECT a, c FROM cmv_basetable WHERE a = 3) table1
@@ -476,10 +587,11 @@ POSTHOOK: query: SELECT * FROM (
ON table1.a = table2.a)
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cmv_basetable
+POSTHOOK: Input: default@cmv_mat_view2
#### A masked pattern was here ####
-3 15.80 3 978.76
-3 9.80 3 978.76
-3 978.76 3 978.76
+3 15.800000190734863 3 978.76
+3 25.600000381469727 3 978.76
+3 978.760009765625 3 978.76
PREHOOK: query: DROP MATERIALIZED VIEW cmv_mat_view
PREHOOK: type: DROP_MATERIALIZED_VIEW
PREHOOK: Input: default@cmv_mat_view