You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/03/05 01:20:57 UTC
svn commit: r1574266 [19/23] - in /hive/trunk:
common/src/java/org/apache/hadoop/hive/conf/
itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/ metastore/
metastore/if/ metastore/scripts/upgrade/derby/
metastore/scripts/upgrade/mysql/ meta...
Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb Wed Mar 5 00:20:53 2014
@@ -32,6 +32,46 @@ module PartitionEventType
VALID_VALUES = Set.new([LOAD_DONE]).freeze
end
+module TxnState
+ COMMITTED = 1
+ ABORTED = 2
+ OPEN = 3
+ VALUE_MAP = {1 => "COMMITTED", 2 => "ABORTED", 3 => "OPEN"}
+ VALID_VALUES = Set.new([COMMITTED, ABORTED, OPEN]).freeze
+end
+
+module LockLevel
+ DB = 1
+ TABLE = 2
+ PARTITION = 3
+ VALUE_MAP = {1 => "DB", 2 => "TABLE", 3 => "PARTITION"}
+ VALID_VALUES = Set.new([DB, TABLE, PARTITION]).freeze
+end
+
+module LockState
+ ACQUIRED = 1
+ WAITING = 2
+ ABORT = 3
+ NOT_ACQUIRED = 4
+ VALUE_MAP = {1 => "ACQUIRED", 2 => "WAITING", 3 => "ABORT", 4 => "NOT_ACQUIRED"}
+ VALID_VALUES = Set.new([ACQUIRED, WAITING, ABORT, NOT_ACQUIRED]).freeze
+end
+
+module LockType
+ SHARED_READ = 1
+ SHARED_WRITE = 2
+ EXCLUSIVE = 3
+ VALUE_MAP = {1 => "SHARED_READ", 2 => "SHARED_WRITE", 3 => "EXCLUSIVE"}
+ VALID_VALUES = Set.new([SHARED_READ, SHARED_WRITE, EXCLUSIVE]).freeze
+end
+
+module CompactionType
+ MINOR = 1
+ MAJOR = 2
+ VALUE_MAP = {1 => "MINOR", 2 => "MAJOR"}
+ VALID_VALUES = Set.new([MINOR, MAJOR]).freeze
+end
+
module FunctionType
JAVA = 1
VALUE_MAP = {1 => "JAVA"}
@@ -1085,6 +1125,465 @@ class Function
::Thrift::Struct.generate_accessors self
end
+class TxnInfo
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ ID = 1
+ STATE = 2
+ USER = 3
+ HOSTNAME = 4
+
+ FIELDS = {
+ ID => {:type => ::Thrift::Types::I64, :name => 'id'},
+ STATE => {:type => ::Thrift::Types::I32, :name => 'state', :enum_class => ::TxnState},
+ USER => {:type => ::Thrift::Types::STRING, :name => 'user'},
+ HOSTNAME => {:type => ::Thrift::Types::STRING, :name => 'hostname'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field id is unset!') unless @id
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field state is unset!') unless @state
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field user is unset!') unless @user
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field hostname is unset!') unless @hostname
+ unless @state.nil? || ::TxnState::VALID_VALUES.include?(@state)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field state!')
+ end
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class GetOpenTxnsInfoResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ TXN_HIGH_WATER_MARK = 1
+ OPEN_TXNS = 2
+
+ FIELDS = {
+ TXN_HIGH_WATER_MARK => {:type => ::Thrift::Types::I64, :name => 'txn_high_water_mark'},
+ OPEN_TXNS => {:type => ::Thrift::Types::LIST, :name => 'open_txns', :element => {:type => ::Thrift::Types::STRUCT, :class => ::TxnInfo}}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txn_high_water_mark is unset!') unless @txn_high_water_mark
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field open_txns is unset!') unless @open_txns
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class GetOpenTxnsResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ TXN_HIGH_WATER_MARK = 1
+ OPEN_TXNS = 2
+
+ FIELDS = {
+ TXN_HIGH_WATER_MARK => {:type => ::Thrift::Types::I64, :name => 'txn_high_water_mark'},
+ OPEN_TXNS => {:type => ::Thrift::Types::SET, :name => 'open_txns', :element => {:type => ::Thrift::Types::I64}}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txn_high_water_mark is unset!') unless @txn_high_water_mark
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field open_txns is unset!') unless @open_txns
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class OpenTxnRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ NUM_TXNS = 1
+ USER = 2
+ HOSTNAME = 3
+
+ FIELDS = {
+ NUM_TXNS => {:type => ::Thrift::Types::I32, :name => 'num_txns'},
+ USER => {:type => ::Thrift::Types::STRING, :name => 'user'},
+ HOSTNAME => {:type => ::Thrift::Types::STRING, :name => 'hostname'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field num_txns is unset!') unless @num_txns
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field user is unset!') unless @user
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field hostname is unset!') unless @hostname
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class OpenTxnsResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ TXN_IDS = 1
+
+ FIELDS = {
+ TXN_IDS => {:type => ::Thrift::Types::LIST, :name => 'txn_ids', :element => {:type => ::Thrift::Types::I64}}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txn_ids is unset!') unless @txn_ids
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class AbortTxnRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ TXNID = 1
+
+ FIELDS = {
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txnid is unset!') unless @txnid
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class CommitTxnRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ TXNID = 1
+
+ FIELDS = {
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txnid is unset!') unless @txnid
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class LockComponent
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ TYPE = 1
+ LEVEL = 2
+ DBNAME = 3
+ TABLENAME = 4
+ PARTITIONNAME = 5
+
+ FIELDS = {
+ TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :enum_class => ::LockType},
+ LEVEL => {:type => ::Thrift::Types::I32, :name => 'level', :enum_class => ::LockLevel},
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
+ TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename', :optional => true},
+ PARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'partitionname', :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field type is unset!') unless @type
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field level is unset!') unless @level
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbname is unset!') unless @dbname
+ unless @type.nil? || ::LockType::VALID_VALUES.include?(@type)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field type!')
+ end
+ unless @level.nil? || ::LockLevel::VALID_VALUES.include?(@level)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field level!')
+ end
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class LockRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ COMPONENT = 1
+ TXNID = 2
+ USER = 3
+ HOSTNAME = 4
+
+ FIELDS = {
+ COMPONENT => {:type => ::Thrift::Types::LIST, :name => 'component', :element => {:type => ::Thrift::Types::STRUCT, :class => ::LockComponent}},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid', :optional => true},
+ USER => {:type => ::Thrift::Types::STRING, :name => 'user'},
+ HOSTNAME => {:type => ::Thrift::Types::STRING, :name => 'hostname'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field component is unset!') unless @component
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field user is unset!') unless @user
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field hostname is unset!') unless @hostname
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class LockResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ LOCKID = 1
+ STATE = 2
+
+ FIELDS = {
+ LOCKID => {:type => ::Thrift::Types::I64, :name => 'lockid'},
+ STATE => {:type => ::Thrift::Types::I32, :name => 'state', :enum_class => ::LockState}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field lockid is unset!') unless @lockid
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field state is unset!') unless @state
+ unless @state.nil? || ::LockState::VALID_VALUES.include?(@state)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field state!')
+ end
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class CheckLockRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ LOCKID = 1
+
+ FIELDS = {
+ LOCKID => {:type => ::Thrift::Types::I64, :name => 'lockid'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field lockid is unset!') unless @lockid
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class UnlockRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ LOCKID = 1
+
+ FIELDS = {
+ LOCKID => {:type => ::Thrift::Types::I64, :name => 'lockid'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field lockid is unset!') unless @lockid
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class ShowLocksRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class ShowLocksResponseElement
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ LOCKID = 1
+ DBNAME = 2
+ TABLENAME = 3
+ PARTNAME = 4
+ STATE = 5
+ TYPE = 6
+ TXNID = 7
+ LASTHEARTBEAT = 8
+ ACQUIREDAT = 9
+ USER = 10
+ HOSTNAME = 11
+
+ FIELDS = {
+ LOCKID => {:type => ::Thrift::Types::I64, :name => 'lockid'},
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
+ TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename', :optional => true},
+ PARTNAME => {:type => ::Thrift::Types::STRING, :name => 'partname', :optional => true},
+ STATE => {:type => ::Thrift::Types::I32, :name => 'state', :enum_class => ::LockState},
+ TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :enum_class => ::LockType},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid', :optional => true},
+ LASTHEARTBEAT => {:type => ::Thrift::Types::I64, :name => 'lastheartbeat'},
+ ACQUIREDAT => {:type => ::Thrift::Types::I64, :name => 'acquiredat', :optional => true},
+ USER => {:type => ::Thrift::Types::STRING, :name => 'user'},
+ HOSTNAME => {:type => ::Thrift::Types::STRING, :name => 'hostname'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field lockid is unset!') unless @lockid
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbname is unset!') unless @dbname
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field state is unset!') unless @state
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field type is unset!') unless @type
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field lastheartbeat is unset!') unless @lastheartbeat
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field user is unset!') unless @user
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field hostname is unset!') unless @hostname
+ unless @state.nil? || ::LockState::VALID_VALUES.include?(@state)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field state!')
+ end
+ unless @type.nil? || ::LockType::VALID_VALUES.include?(@type)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field type!')
+ end
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class ShowLocksResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ LOCKS = 1
+
+ FIELDS = {
+ LOCKS => {:type => ::Thrift::Types::LIST, :name => 'locks', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ShowLocksResponseElement}}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class HeartbeatRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ LOCKID = 1
+ TXNID = 2
+
+ FIELDS = {
+ LOCKID => {:type => ::Thrift::Types::I64, :name => 'lockid', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid', :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class CompactionRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ DBNAME = 1
+ TABLENAME = 2
+ PARTITIONNAME = 3
+ TYPE = 4
+ RUNAS = 5
+
+ FIELDS = {
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
+ TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'},
+ PARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'partitionname', :optional => true},
+ TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :enum_class => ::CompactionType},
+ RUNAS => {:type => ::Thrift::Types::STRING, :name => 'runas', :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbname is unset!') unless @dbname
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablename is unset!') unless @tablename
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field type is unset!') unless @type
+ unless @type.nil? || ::CompactionType::VALID_VALUES.include?(@type)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field type!')
+ end
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class ShowCompactRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class ShowCompactResponseElement
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ DBNAME = 1
+ TABLENAME = 2
+ PARTITIONNAME = 3
+ TYPE = 4
+ STATE = 5
+ WORKERID = 6
+ START = 7
+ RUNAS = 8
+
+ FIELDS = {
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
+ TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'},
+ PARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'partitionname'},
+ TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :enum_class => ::CompactionType},
+ STATE => {:type => ::Thrift::Types::STRING, :name => 'state'},
+ WORKERID => {:type => ::Thrift::Types::STRING, :name => 'workerid'},
+ START => {:type => ::Thrift::Types::I64, :name => 'start'},
+ RUNAS => {:type => ::Thrift::Types::STRING, :name => 'runAs'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbname is unset!') unless @dbname
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablename is unset!') unless @tablename
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partitionname is unset!') unless @partitionname
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field type is unset!') unless @type
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field state is unset!') unless @state
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field workerid is unset!') unless @workerid
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field start is unset!') unless @start
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field runAs is unset!') unless @runAs
+ unless @type.nil? || ::CompactionType::VALID_VALUES.include?(@type)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field type!')
+ end
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class ShowCompactResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ COMPACTS = 1
+
+ FIELDS = {
+ COMPACTS => {:type => ::Thrift::Types::LIST, :name => 'compacts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ShowCompactResponseElement}}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field compacts is unset!') unless @compacts
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
class MetaException < ::Thrift::Exception
include ::Thrift::Struct, ::Thrift::Struct_Union
def initialize(message=nil)
@@ -1337,3 +1836,87 @@ class InvalidInputException < ::Thrift::
::Thrift::Struct.generate_accessors self
end
+class NoSuchTxnException < ::Thrift::Exception
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ def initialize(message=nil)
+ super()
+ self.message = message
+ end
+
+ MESSAGE = 1
+
+ FIELDS = {
+ MESSAGE => {:type => ::Thrift::Types::STRING, :name => 'message'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class TxnAbortedException < ::Thrift::Exception
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ def initialize(message=nil)
+ super()
+ self.message = message
+ end
+
+ MESSAGE = 1
+
+ FIELDS = {
+ MESSAGE => {:type => ::Thrift::Types::STRING, :name => 'message'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class TxnOpenException < ::Thrift::Exception
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ def initialize(message=nil)
+ super()
+ self.message = message
+ end
+
+ MESSAGE = 1
+
+ FIELDS = {
+ MESSAGE => {:type => ::Thrift::Types::STRING, :name => 'message'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class NoSuchLockException < ::Thrift::Exception
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ def initialize(message=nil)
+ super()
+ self.message = message
+ end
+
+ MESSAGE = 1
+
+ FIELDS = {
+ MESSAGE => {:type => ::Thrift::Types::STRING, :name => 'message'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Wed Mar 5 00:20:53 2014
@@ -1575,6 +1575,194 @@ module ThriftHiveMetastore
return
end
+ def get_open_txns()
+ send_get_open_txns()
+ return recv_get_open_txns()
+ end
+
+ def send_get_open_txns()
+ send_message('get_open_txns', Get_open_txns_args)
+ end
+
+ def recv_get_open_txns()
+ result = receive_message(Get_open_txns_result)
+ return result.success unless result.success.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_open_txns failed: unknown result')
+ end
+
+ def get_open_txns_info()
+ send_get_open_txns_info()
+ return recv_get_open_txns_info()
+ end
+
+ def send_get_open_txns_info()
+ send_message('get_open_txns_info', Get_open_txns_info_args)
+ end
+
+ def recv_get_open_txns_info()
+ result = receive_message(Get_open_txns_info_result)
+ return result.success unless result.success.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_open_txns_info failed: unknown result')
+ end
+
+ def open_txns(rqst)
+ send_open_txns(rqst)
+ return recv_open_txns()
+ end
+
+ def send_open_txns(rqst)
+ send_message('open_txns', Open_txns_args, :rqst => rqst)
+ end
+
+ def recv_open_txns()
+ result = receive_message(Open_txns_result)
+ return result.success unless result.success.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'open_txns failed: unknown result')
+ end
+
+ def abort_txn(rqst)
+ send_abort_txn(rqst)
+ recv_abort_txn()
+ end
+
+ def send_abort_txn(rqst)
+ send_message('abort_txn', Abort_txn_args, :rqst => rqst)
+ end
+
+ def recv_abort_txn()
+ result = receive_message(Abort_txn_result)
+ raise result.o1 unless result.o1.nil?
+ return
+ end
+
+ def commit_txn(rqst)
+ send_commit_txn(rqst)
+ recv_commit_txn()
+ end
+
+ def send_commit_txn(rqst)
+ send_message('commit_txn', Commit_txn_args, :rqst => rqst)
+ end
+
+ def recv_commit_txn()
+ result = receive_message(Commit_txn_result)
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ return
+ end
+
+ def lock(rqst)
+ send_lock(rqst)
+ return recv_lock()
+ end
+
+ def send_lock(rqst)
+ send_message('lock', Lock_args, :rqst => rqst)
+ end
+
+ def recv_lock()
+ result = receive_message(Lock_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'lock failed: unknown result')
+ end
+
+ def check_lock(rqst)
+ send_check_lock(rqst)
+ return recv_check_lock()
+ end
+
+ def send_check_lock(rqst)
+ send_message('check_lock', Check_lock_args, :rqst => rqst)
+ end
+
+ def recv_check_lock()
+ result = receive_message(Check_lock_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'check_lock failed: unknown result')
+ end
+
+ def unlock(rqst)
+ send_unlock(rqst)
+ recv_unlock()
+ end
+
+ def send_unlock(rqst)
+ send_message('unlock', Unlock_args, :rqst => rqst)
+ end
+
+ def recv_unlock()
+ result = receive_message(Unlock_result)
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ return
+ end
+
+ def show_locks(rqst)
+ send_show_locks(rqst)
+ return recv_show_locks()
+ end
+
+ def send_show_locks(rqst)
+ send_message('show_locks', Show_locks_args, :rqst => rqst)
+ end
+
+ def recv_show_locks()
+ result = receive_message(Show_locks_result)
+ return result.success unless result.success.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'show_locks failed: unknown result')
+ end
+
+ def heartbeat(ids)
+ send_heartbeat(ids)
+ recv_heartbeat()
+ end
+
+ def send_heartbeat(ids)
+ send_message('heartbeat', Heartbeat_args, :ids => ids)
+ end
+
+ def recv_heartbeat()
+ result = receive_message(Heartbeat_result)
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ return
+ end
+
+ def compact(rqst)
+ send_compact(rqst)
+ recv_compact()
+ end
+
+ def send_compact(rqst)
+ send_message('compact', Compact_args, :rqst => rqst)
+ end
+
+ def recv_compact()
+ result = receive_message(Compact_result)
+ return
+ end
+
+ def show_compact(rqst)
+ send_show_compact(rqst)
+ return recv_show_compact()
+ end
+
+ def send_show_compact(rqst)
+ send_message('show_compact', Show_compact_args, :rqst => rqst)
+ end
+
+ def recv_show_compact()
+ result = receive_message(Show_compact_result)
+ return result.success unless result.success.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'show_compact failed: unknown result')
+ end
+
end
class Processor < ::FacebookService::Processor
@@ -2812,6 +3000,128 @@ module ThriftHiveMetastore
write_result(result, oprot, 'cancel_delegation_token', seqid)
end
+ def process_get_open_txns(seqid, iprot, oprot)
+ args = read_args(iprot, Get_open_txns_args)
+ result = Get_open_txns_result.new()
+ result.success = @handler.get_open_txns()
+ write_result(result, oprot, 'get_open_txns', seqid)
+ end
+
+ def process_get_open_txns_info(seqid, iprot, oprot)
+ args = read_args(iprot, Get_open_txns_info_args)
+ result = Get_open_txns_info_result.new()
+ result.success = @handler.get_open_txns_info()
+ write_result(result, oprot, 'get_open_txns_info', seqid)
+ end
+
+ def process_open_txns(seqid, iprot, oprot)
+ args = read_args(iprot, Open_txns_args)
+ result = Open_txns_result.new()
+ result.success = @handler.open_txns(args.rqst)
+ write_result(result, oprot, 'open_txns', seqid)
+ end
+
+ def process_abort_txn(seqid, iprot, oprot)
+ args = read_args(iprot, Abort_txn_args)
+ result = Abort_txn_result.new()
+ begin
+ @handler.abort_txn(args.rqst)
+ rescue ::NoSuchTxnException => o1
+ result.o1 = o1
+ end
+ write_result(result, oprot, 'abort_txn', seqid)
+ end
+
+ def process_commit_txn(seqid, iprot, oprot)
+ args = read_args(iprot, Commit_txn_args)
+ result = Commit_txn_result.new()
+ begin
+ @handler.commit_txn(args.rqst)
+ rescue ::NoSuchTxnException => o1
+ result.o1 = o1
+ rescue ::TxnAbortedException => o2
+ result.o2 = o2
+ end
+ write_result(result, oprot, 'commit_txn', seqid)
+ end
+
+ def process_lock(seqid, iprot, oprot)
+ args = read_args(iprot, Lock_args)
+ result = Lock_result.new()
+ begin
+ result.success = @handler.lock(args.rqst)
+ rescue ::NoSuchTxnException => o1
+ result.o1 = o1
+ rescue ::TxnAbortedException => o2
+ result.o2 = o2
+ end
+ write_result(result, oprot, 'lock', seqid)
+ end
+
+ def process_check_lock(seqid, iprot, oprot)
+ args = read_args(iprot, Check_lock_args)
+ result = Check_lock_result.new()
+ begin
+ result.success = @handler.check_lock(args.rqst)
+ rescue ::NoSuchTxnException => o1
+ result.o1 = o1
+ rescue ::TxnAbortedException => o2
+ result.o2 = o2
+ rescue ::NoSuchLockException => o3
+ result.o3 = o3
+ end
+ write_result(result, oprot, 'check_lock', seqid)
+ end
+
+ def process_unlock(seqid, iprot, oprot)
+ args = read_args(iprot, Unlock_args)
+ result = Unlock_result.new()
+ begin
+ @handler.unlock(args.rqst)
+ rescue ::NoSuchLockException => o1
+ result.o1 = o1
+ rescue ::TxnOpenException => o2
+ result.o2 = o2
+ end
+ write_result(result, oprot, 'unlock', seqid)
+ end
+
+ def process_show_locks(seqid, iprot, oprot)
+ args = read_args(iprot, Show_locks_args)
+ result = Show_locks_result.new()
+ result.success = @handler.show_locks(args.rqst)
+ write_result(result, oprot, 'show_locks', seqid)
+ end
+
+ def process_heartbeat(seqid, iprot, oprot)
+ args = read_args(iprot, Heartbeat_args)
+ result = Heartbeat_result.new()
+ begin
+ @handler.heartbeat(args.ids)
+ rescue ::NoSuchLockException => o1
+ result.o1 = o1
+ rescue ::NoSuchTxnException => o2
+ result.o2 = o2
+ rescue ::TxnAbortedException => o3
+ result.o3 = o3
+ end
+ write_result(result, oprot, 'heartbeat', seqid)
+ end
+
+ def process_compact(seqid, iprot, oprot)
+ args = read_args(iprot, Compact_args)
+ result = Compact_result.new()
+ @handler.compact(args.rqst)
+ write_result(result, oprot, 'compact', seqid)
+ end
+
+ def process_show_compact(seqid, iprot, oprot)
+ args = read_args(iprot, Show_compact_args)
+ result = Show_compact_result.new()
+ result.success = @handler.show_compact(args.rqst)
+ write_result(result, oprot, 'show_compact', seqid)
+ end
+
end
# HELPER FUNCTIONS AND STRUCTURES
@@ -6429,5 +6739,404 @@ module ThriftHiveMetastore
::Thrift::Struct.generate_accessors self
end
+ class Get_open_txns_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Get_open_txns_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetOpenTxnsResponse}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Get_open_txns_info_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Get_open_txns_info_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetOpenTxnsInfoResponse}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Open_txns_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RQST = 1
+
+ FIELDS = {
+ RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::OpenTxnRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Open_txns_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::OpenTxnsResponse}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Abort_txn_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RQST = 1
+
+ FIELDS = {
+ RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::AbortTxnRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Abort_txn_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ O1 = 1
+
+ FIELDS = {
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchTxnException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Commit_txn_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RQST = 1
+
+ FIELDS = {
+ RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::CommitTxnRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Commit_txn_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ O1 = 1
+ O2 = 2
+
+ FIELDS = {
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchTxnException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::TxnAbortedException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Lock_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RQST = 1
+
+ FIELDS = {
+ RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::LockRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Lock_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::LockResponse},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchTxnException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::TxnAbortedException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Check_lock_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RQST = 1
+
+ FIELDS = {
+ RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::CheckLockRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Check_lock_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+ O3 = 3
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::LockResponse},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchTxnException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::TxnAbortedException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::NoSuchLockException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Unlock_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RQST = 1
+
+ FIELDS = {
+ RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::UnlockRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Unlock_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ O1 = 1
+ O2 = 2
+
+ FIELDS = {
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchLockException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::TxnOpenException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Show_locks_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RQST = 1
+
+ FIELDS = {
+ RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::ShowLocksRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Show_locks_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::ShowLocksResponse}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Heartbeat_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ IDS = 1
+
+ FIELDS = {
+ IDS => {:type => ::Thrift::Types::STRUCT, :name => 'ids', :class => ::HeartbeatRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Heartbeat_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ O1 = 1
+ O2 = 2
+ O3 = 3
+
+ FIELDS = {
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchLockException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchTxnException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::TxnAbortedException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Compact_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RQST = 1
+
+ FIELDS = {
+ RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::CompactionRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Compact_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Show_compact_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ RQST = 1
+
+ FIELDS = {
+ RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::ShowCompactRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Show_compact_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::ShowCompactResponse}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
end
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Wed Mar 5 00:20:53 2014
@@ -18,6 +18,8 @@
package org.apache.hadoop.hive.metastore;
+import com.facebook.fb303.FacebookBase;
+import com.facebook.fb303.fb_status;
import static org.apache.commons.lang.StringUtils.join;
import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT;
import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
@@ -60,12 +62,16 @@ import org.apache.hadoop.hive.common.cli
import org.apache.hadoop.hive.common.metrics.Metrics;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
import org.apache.hadoop.hive.metastore.api.AddPartitionsResult;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
@@ -74,6 +80,9 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
import org.apache.hadoop.hive.metastore.api.HiveObjectType;
@@ -83,8 +92,14 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
+import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.PartitionEventType;
import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
@@ -97,16 +112,24 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
import org.apache.hadoop.hive.metastore.api.TableStatsResult;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnOpenException;
import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.UnlockRequest;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
@@ -149,14 +172,8 @@ import org.apache.thrift.TProcessor;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.server.TServer;
import org.apache.thrift.server.TThreadPoolServer;
-import org.apache.thrift.transport.TFramedTransport;
-import org.apache.thrift.transport.TServerSocket;
-import org.apache.thrift.transport.TServerTransport;
-import org.apache.thrift.transport.TTransport;
-import org.apache.thrift.transport.TTransportFactory;
+import org.apache.thrift.transport.*;
-import com.facebook.fb303.FacebookBase;
-import com.facebook.fb303.fb_status;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
@@ -218,6 +235,13 @@ public class HiveMetaStore extends Thrif
}
};
+ private final ThreadLocal<TxnHandler> threadLocalTxn = new ThreadLocal<TxnHandler>() {
+ @Override
+ protected synchronized TxnHandler initialValue() {
+ return null;
+ }
+ };
+
// Thread local configuration is needed as many threads could make changes
// to the conf using the connection hook
private final ThreadLocal<Configuration> threadLocalConf =
@@ -443,6 +467,15 @@ public class HiveMetaStore extends Thrif
return ms;
}
+ private TxnHandler getTxnHandler() {
+ TxnHandler txn = threadLocalTxn.get();
+ if (txn == null) {
+ txn = new TxnHandler(hiveConf);
+ threadLocalTxn.set(txn);
+ }
+ return txn;
+ }
+
private RawStore newRawStore() throws MetaException {
LOG.info(addPrefix("Opening raw store with implemenation class:"
+ rawStoreClassName));
@@ -4706,6 +4739,121 @@ public class HiveMetaStore extends Thrif
return func;
}
+
+ // Transaction and locking methods
+ @Override
+ public GetOpenTxnsResponse get_open_txns() throws TException {
+ try {
+ return getTxnHandler().getOpenTxns();
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ // Transaction and locking methods
+ @Override
+ public GetOpenTxnsInfoResponse get_open_txns_info() throws TException {
+ try {
+ return getTxnHandler().getOpenTxnsInfo();
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public OpenTxnsResponse open_txns(OpenTxnRequest rqst) throws TException {
+ try {
+ return getTxnHandler().openTxns(rqst);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public void abort_txn(AbortTxnRequest rqst) throws NoSuchTxnException, TException {
+ try {
+ getTxnHandler().abortTxn(rqst);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public void commit_txn(CommitTxnRequest rqst)
+ throws NoSuchTxnException, TxnAbortedException, TException {
+ try {
+ getTxnHandler().commitTxn(rqst);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public LockResponse lock(LockRequest rqst)
+ throws NoSuchTxnException, TxnAbortedException, TException {
+ try {
+ return getTxnHandler().lock(rqst);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public LockResponse check_lock(CheckLockRequest rqst)
+ throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, TException {
+ try {
+ return getTxnHandler().checkLock(rqst);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public void unlock(UnlockRequest rqst)
+ throws NoSuchLockException, TxnOpenException, TException {
+ try {
+ getTxnHandler().unlock(rqst);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public ShowLocksResponse show_locks(ShowLocksRequest rqst) throws TException {
+ try {
+ return getTxnHandler().showLocks(rqst);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public void heartbeat(HeartbeatRequest ids)
+ throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, TException {
+ try {
+ getTxnHandler().heartbeat(ids);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public void compact(CompactionRequest rqst) throws TException {
+ try {
+ getTxnHandler().compact(rqst);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
+
+ @Override
+ public ShowCompactResponse show_compact(ShowCompactRequest rqst) throws TException {
+ try {
+ return getTxnHandler().showCompact(rqst);
+ } catch (MetaException e) {
+ throw new TException(e);
+ }
+ }
}
public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException {
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Wed Mar 5 00:20:53 2014
@@ -26,8 +26,8 @@ import java.lang.reflect.InvocationHandl
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
-import java.net.URI;
-import java.net.URISyntaxException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
@@ -46,11 +46,16 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
import org.apache.hadoop.hive.metastore.api.AddPartitionsResult;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
@@ -59,6 +64,9 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
import org.apache.hadoop.hive.metastore.api.Index;
@@ -66,8 +74,14 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
+import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.PartitionEventType;
import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
@@ -78,13 +92,20 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnOpenException;
import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.UnlockRequest;
import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
@@ -98,6 +119,12 @@ import org.apache.thrift.transport.TSock
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.isIndexTable;
+
/**
* Hive Metastore Client.
*/
@@ -665,7 +692,6 @@ public class HiveMetaStoreClient impleme
* @param name
* @param dbname
* @throws NoSuchObjectException
- * @throws ExistingDependentsException
* @throws MetaException
* @throws TException
* @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
@@ -689,7 +715,6 @@ public class HiveMetaStoreClient impleme
* @param deleteData
* delete the underlying data or just delete the table in metadata
* @throws NoSuchObjectException
- * @throws ExistingDependentsException
* @throws MetaException
* @throws TException
* @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
@@ -920,7 +945,8 @@ public class HiveMetaStoreClient impleme
List<String> part_vals, String user_name, List<String> group_names)
throws MetaException, UnknownTableException, NoSuchObjectException,
TException {
- return deepCopy(client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names));
+ return deepCopy(client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name,
+ group_names));
}
/**
@@ -1444,6 +1470,167 @@ public class HiveMetaStoreClient impleme
client.cancel_delegation_token(tokenStrForm);
}
+ public static class ValidTxnListImpl implements ValidTxnList {
+
+ private GetOpenTxnsResponse txns;
+
+ public ValidTxnListImpl() {
+ }
+
+ public ValidTxnListImpl(GetOpenTxnsResponse t) {
+ txns = t;
+ }
+
+ @Override
+ public boolean isTxnCommitted(long txnid) {
+ if (txns.getTxn_high_water_mark() < txnid) return false;
+ return !txns.getOpen_txns().contains(txnid);
+ }
+
+ @Override
+ public RangeResponse isTxnRangeCommitted(long minTxnId, long maxTxnId) {
+ if (txns.getTxn_high_water_mark() < minTxnId) return RangeResponse.NONE;
+
+ RangeResponse rc = RangeResponse.ALL;
+ boolean foundCommitted = false;
+ for (long id = minTxnId; id <= maxTxnId; id++) {
+ if (isTxnCommitted(id)) foundCommitted = true;
+ else rc = RangeResponse.SOME;
+ }
+ if (!foundCommitted) rc = RangeResponse.NONE;
+ return rc;
+ }
+
+ @Override
+ public GetOpenTxnsResponse getOpenTxns() {
+ return txns;
+ }
+
+ @Override
+ public String toString() {
+ StringBuffer buf = new StringBuffer();
+ buf.append(getOpenTxns().getTxn_high_water_mark());
+ Set<Long> openTxns = getOpenTxns().getOpen_txns();
+ if (openTxns != null && openTxns.size() > 0) {
+ for (long txn : openTxns) {
+ buf.append(':');
+ buf.append(txn);
+ }
+ } else {
+ buf.append(':');
+ }
+ return buf.toString();
+ }
+
+ @Override
+ public void fromString(String src) {
+ // Make sure we have a non-null value in txns so that any future calls to this don't NPE.
+ txns = new GetOpenTxnsResponse();
+ if (src == null) {
+ txns.setTxn_high_water_mark(Long.MAX_VALUE);
+ txns.setOpen_txns(new HashSet<Long>());
+ return;
+ }
+
+ String[] tString = src.split(":");
+ txns.setTxn_high_water_mark(Long.valueOf(tString[0]));
+ Set<Long> openTxns = new HashSet<Long>();
+ for (int i = 1; i < tString.length; i++) openTxns.add(Long.valueOf(tString[i]));
+ txns.setOpen_txns(openTxns);
+ }
+ }
+
+ @Override
+ public ValidTxnList getValidTxns() throws TException {
+ GetOpenTxnsResponse txns = client.get_open_txns();
+ return new ValidTxnListImpl(txns);
+ }
+
+ @Override
+ public long openTxn(String user) throws TException {
+ OpenTxnsResponse txns = openTxns(user, 1);
+ return txns.getTxn_ids().get(0);
+ }
+
+ @Override
+ public OpenTxnsResponse openTxns(String user, int numTxns) throws TException {
+ String hostname = null;
+ try {
+ hostname = InetAddress.getLocalHost().getHostName();
+ } catch (UnknownHostException e) {
+ LOG.error("Unable to resolve my host name " + e.getMessage());
+ throw new RuntimeException(e);
+ }
+ return client.open_txns(new OpenTxnRequest(numTxns, user, hostname));
+ }
+
+ @Override
+ public void rollbackTxn(long txnid) throws NoSuchTxnException, TException {
+ client.abort_txn(new AbortTxnRequest(txnid));
+ }
+
+ @Override
+ public void commitTxn(long txnid)
+ throws NoSuchTxnException, TxnAbortedException, TException {
+ client.commit_txn(new CommitTxnRequest(txnid));
+ }
+
+ @Override
+ public GetOpenTxnsInfoResponse showTxns() throws TException {
+ return client.get_open_txns_info();
+ }
+
+ @Override
+ public LockResponse lock(LockRequest request)
+ throws NoSuchTxnException, TxnAbortedException, TException {
+ return client.lock(request);
+ }
+
+ @Override
+ public LockResponse checkLock(long lockid)
+ throws NoSuchTxnException, TxnAbortedException, NoSuchLockException,
+ TException {
+ return client.check_lock(new CheckLockRequest(lockid));
+ }
+
+ @Override
+ public void unlock(long lockid)
+ throws NoSuchLockException, TxnOpenException, TException {
+ client.unlock(new UnlockRequest(lockid));
+ }
+
+ @Override
+ public ShowLocksResponse showLocks() throws TException {
+ return client.show_locks(new ShowLocksRequest());
+ }
+
+ @Override
+ public void heartbeat(long txnid, long lockid)
+ throws NoSuchLockException, NoSuchTxnException, TxnAbortedException,
+ TException {
+ HeartbeatRequest hb = new HeartbeatRequest();
+ hb.setLockid(lockid);
+ hb.setTxnid(txnid);
+ client.heartbeat(hb);
+ }
+
+ @Override
+ public void compact(String dbname, String tableName, String partitionName, CompactionType type)
+ throws TException {
+ CompactionRequest cr = new CompactionRequest();
+ if (dbname == null) cr.setDbname(DEFAULT_DATABASE_NAME);
+ else cr.setDbname(dbname);
+ cr.setTablename(tableName);
+ if (partitionName != null) cr.setPartitionname(partitionName);
+ cr.setType(type);
+ client.compact(cr);
+ }
+
+ @Override
+ public ShowCompactResponse showCompactions() throws TException {
+ return client.show_compact(new ShowCompactRequest());
+ }
+
/**
* Creates a synchronized wrapper for any {@link IMetaStoreClient}.
* This may be used by multi-threaded applications until we have
@@ -1483,7 +1670,8 @@ public class HiveMetaStoreClient impleme
@Override
public void markPartitionForEvent(String db_name, String tbl_name, Map<String,String> partKVs, PartitionEventType eventType)
- throws MetaException, TException, NoSuchObjectException, UnknownDBException, UnknownTableException,
+ throws MetaException, TException, NoSuchObjectException, UnknownDBException,
+ UnknownTableException,
InvalidPartitionException, UnknownPartitionException {
assert db_name != null;
assert tbl_name != null;
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Wed Mar 5 00:20:53 2014
@@ -18,6 +18,20 @@
package org.apache.hadoop.hive.metastore;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnOpenException;
+import org.apache.thrift.TException;
+
import java.util.List;
import java.util.Map;
@@ -159,7 +173,6 @@ public interface IMetaStoreClient {
* The table wasn't found.
* @throws TException
* A thrift communication error occurred
- * @throws ExistingDependentsException
*/
public void dropTable(String dbname, String tableName, boolean deleteData,
boolean ignoreUknownTab) throws MetaException, TException,
@@ -361,7 +374,9 @@ public interface IMetaStoreClient {
List<String> partVals) throws NoSuchObjectException, MetaException, TException;
/**
- * @param partition
+ * @param partitionSpecs
+ * @param sourceDb
+ * @param sourceTable
* @param destdb
* @param destTableName
* @return partition object
@@ -1020,6 +1035,262 @@ public interface IMetaStoreClient {
public List<String> getFunctions(String dbName, String pattern)
throws MetaException, TException;
+ // Transaction and locking methods
+ public interface ValidTxnList {
+
+ /**
+ * Key used to store valid txn list in a {@link org.apache.hadoop.conf.Configuration} object.
+ */
+ public static final String VALID_TXNS_KEY = "hive.txn.valid.txns";
+
+ /**
+ * The response to a range query. NONE means no values in this range match,
+ * SOME mean that some do, and ALL means that every value does.
+ */
+ public enum RangeResponse {NONE, SOME, ALL};
+
+ /**
+ * Indicates whether a given transaction has been committed and should be
+ * viewed as valid for read.
+ * @param txnid id for the transaction
+ * @return true if committed, false otherwise
+ */
+ public boolean isTxnCommitted(long txnid);
+
+ /**
+ * Find out if a range of transaction ids have been committed.
+ * @param minTxnId minimum txnid to look for, inclusive
+ * @param maxTxnId maximum txnid to look for, inclusive
+ * @return Indicate whether none, some, or all of these transactions have been committed.
+ */
+ public RangeResponse isTxnRangeCommitted(long minTxnId, long maxTxnId);
+
+ /**
+ * Get at the underlying OpenTxn structure. This is useful if the user
+ * wishes to get a list of all open transactions for more efficient
+ * filtering.
+ * @return open transactions
+ */
+ public GetOpenTxnsResponse getOpenTxns();
+
+ /**
+ * Write this validTxnList into a string. Obviously all implementations will already
+ * implement this, but it is being called out specifically here to make clear that the
+ * implementation needs to override the default implementation. This should produce a string
+ * that can be used by {@link #fromString(String)} to populate a validTxnsList.
+ */
+ public String toString();
+
+ /**
+ * Populate this validTxnList from the string. It is assumed that the string was created via
+ * {@link #toString()}.
+ * @param src source string.
+ */
+ public void fromString(String src);
+ }
+
+ /**
+ * Get a structure that details valid transactions.
+ * @return list of valid transactions
+ * @throws TException
+ */
+ public ValidTxnList getValidTxns() throws TException;
+
+ /**
+ * Initiate a transaction.
+ * @param user User who is opening this transaction. This is the Hive user,
+ * not necessarily the OS user. It is assumed that this user has already been
+ * authenticated and authorized at this point.
+ * @return transaction identifier
+ * @throws TException
+ */
+ public long openTxn(String user) throws TException;
+
+ /**
+ * Initiate a batch of transactions. It is not guaranteed that the
+ * requested number of transactions will be instantiated. The system has a
+ * maximum number instantiated per request, controlled by hive.txn.max
+ * .batch.open in hive-site.xml. If the user requests more than this
+ * value, only the configured max will be returned.
+ *
+ * <p>Increasing the number of transactions requested in the batch will
+ * allow applications that stream data into Hive to place more commits in a
+ * single file, thus reducing load on the namenode and making reads of the
+ * data more efficient. However, opening more transactions in a batch will
+ * also result in readers needing to keep a larger list of open
+ * transactions to ignore, potentially slowing their reads. Users will
+ * need to test in their system to understand the optimal number of
+ * transactions to request in a batch.
+ * </p>
+ * @param user User who is opening this transaction. This is the Hive user,
+ * not necessarily the OS user. It is assumed that this user has already been
+ * authenticated and authorized at this point.
+ * @param numTxns number of requested transactions to open
+ * @return list of opened txn ids. As noted above, this may be less than
+ * requested, so the user should check how many were returned rather than
+ * optimistically assuming that the result matches the request.
+ * @throws TException
+ */
+ public OpenTxnsResponse openTxns(String user, int numTxns) throws TException;
+
+ /**
+ * Rollback a transaction. This will also unlock any locks associated with
+ * this transaction.
+ * @param txnid id of transaction to be rolled back.
+ * @throws NoSuchTxnException if the requested transaction does not exist.
+ * Note that this can result from the transaction having timed out and been
+ * deleted.
+ * @throws TException
+ */
+ public void rollbackTxn(long txnid) throws NoSuchTxnException, TException;
+
+ /**
+ * Commit a transaction. This will also unlock any locks associated with
+ * this transaction.
+ * @param txnid id of transaction to be committed.
+ * @throws NoSuchTxnException if the requested transaction does not exist.
+ * This can result fro the transaction having timed out and been deleted by
+ * the compactor.
+ * @throws TxnAbortedException if the requested transaction has been
+ * aborted. This can result from the transaction timing out.
+ * @throws TException
+ */
+ public void commitTxn(long txnid)
+ throws NoSuchTxnException, TxnAbortedException, TException;
+
+ /**
+ * Show the list of currently open transactions. This is for use by "show transactions" in the
+ * grammar, not for applications that want to find a list of current transactions to work with.
+ * Those wishing the latter should call {@link #getValidTxns()}.
+ * @return List of currently opened transactions, included aborted ones.
+ * @throws TException
+ */
+ public GetOpenTxnsInfoResponse showTxns() throws TException;
+
+ /**
+ * Request a set of locks. All locks needed for a particular query, DML,
+ * or DDL operation should be batched together and requested in one lock
+ * call. This avoids deadlocks. It also avoids blocking other users who
+ * only require some of the locks required by this user.
+ *
+ * <p>If the operation requires a transaction (INSERT, UPDATE,
+ * or DELETE) that transaction id must be provided as part this lock
+ * request. All locks associated with a transaction will be released when
+ * that transaction is committed or rolled back.</p>
+ * *
+ * <p>Once a lock is acquired, {@link #heartbeat(long, long)} must be called
+ * on a regular basis to avoid the lock being timed out by the system.</p>
+ * @param request The lock request. {@link LockRequestBuilder} can be used
+ * construct this request.
+ * @return a lock response, which will provide two things,
+ * the id of the lock (to be used in all further calls regarding this lock)
+ * as well as a state of the lock. If the state is ACQUIRED then the user
+ * can proceed. If it is WAITING the user should wait and call
+ * {@link #checkLock(long)} before proceeding. All components of the lock
+ * will have the same state.
+ * @throws NoSuchTxnException if the requested transaction does not exist.
+ * This can result fro the transaction having timed out and been deleted by
+ * the compactor.
+ * @throws TxnAbortedException if the requested transaction has been
+ * aborted. This can result from the transaction timing out.
+ * @throws TException
+ */
+ public LockResponse lock(LockRequest request)
+ throws NoSuchTxnException, TxnAbortedException, TException;
+
+ /**
+ * Check the status of a set of locks requested via a
+ * {@link #lock(org.apache.hadoop.hive.metastore.api.LockRequest)} call.
+ * Once a lock is acquired, {@link #heartbeat(long, long)} must be called
+ * on a regular basis to avoid the lock being timed out by the system.
+ * @param lockid lock id returned by lock().
+ * @return a lock response, which will provide two things,
+ * the id of the lock (to be used in all further calls regarding this lock)
+ * as well as a state of the lock. If the state is ACQUIRED then the user
+ * can proceed. If it is WAITING the user should wait and call
+ * this method again before proceeding. All components of the lock
+ * will have the same state.
+ * @throws NoSuchTxnException if the requested transaction does not exist.
+ * This can result fro the transaction having timed out and been deleted by
+ * the compactor.
+ * @throws TxnAbortedException if the requested transaction has been
+ * aborted. This can result from the transaction timing out.
+ * @throws NoSuchLockException if the requested lockid does not exist.
+ * This can result from the lock timing out and being unlocked by the system.
+ * @throws TException
+ */
+ public LockResponse checkLock(long lockid)
+ throws NoSuchTxnException, TxnAbortedException, NoSuchLockException,
+ TException;
+
+ /**
+ * Unlock a set of locks. This can only be called when the locks are not
+ * assocaited with a transaction.
+ * @param lockid lock id returned by
+ * {@link #lock(org.apache.hadoop.hive.metastore.api.LockRequest)}
+ * @throws NoSuchLockException if the requested lockid does not exist.
+ * This can result from the lock timing out and being unlocked by the system.
+ * @throws TxnOpenException if the locks are are associated with a
+ * transaction.
+ * @throws TException
+ */
+ public void unlock(long lockid)
+ throws NoSuchLockException, TxnOpenException, TException;
+
+ /**
+ * Show all currently held and waiting locks.
+ * @return List of currently held and waiting locks.
+ * @throws TException
+ */
+ public ShowLocksResponse showLocks() throws TException;
+
+ /**
+ * Send a heartbeat to indicate that the client holding these locks (if
+ * any) and that opened this transaction (if one exists) is still alive.
+ * The default timeout for transactions and locks is 300 seconds,
+ * though it is configurable. To determine how often to heartbeat you will
+ * need to ask your system administrator how the metastore thrift service
+ * has been configured.
+ * @param txnid the id of the open transaction. If no transaction is open
+ * (it is a DDL or query) then this can be set to 0.
+ * @param lockid the id of the locks obtained. If no locks have been
+ * obtained then this can be set to 0.
+ * @throws NoSuchTxnException if the requested transaction does not exist.
+ * This can result fro the transaction having timed out and been deleted by
+ * the compactor.
+ * @throws TxnAbortedException if the requested transaction has been
+ * aborted. This can result from the transaction timing out.
+ * @throws NoSuchLockException if the requested lockid does not exist.
+ * This can result from the lock timing out and being unlocked by the system.
+ * @throws TException
+ */
+ public void heartbeat(long txnid, long lockid)
+ throws NoSuchLockException, NoSuchTxnException, TxnAbortedException,
+ TException;
+
+ /**
+ * Send a request to compact a table or partition. This will not block until the compaction is
+ * complete. It will instead put a request on the queue for that table or partition to be
+ * compacted. No checking is done on the dbname, tableName, or partitionName to make sure they
+ * refer to valid objects. It is assumed this has already been done by the caller.
+ * @param dbname Name of the database the table is in. If null, this will be assumed to be
+ * 'default'.
+ * @param tableName Name of the table to be compacted. This cannot be null. If partitionName
+ * is null, this must be a non-partitioned table.
+ * @param partitionName Name of the partition to be compacted
+ * @param type Whether this is a major or minor compaction.
+ * @throws TException
+ */
+ public void compact(String dbname, String tableName, String partitionName, CompactionType type)
+ throws TException;
+
+ /**
+ * Get a list of all current compactions.
+ * @return List of all current compactions. This includes compactions waiting to happen,
+ * in progress, and finished but waiting to clean the existing files.
+ * @throws TException
+ */
+ public ShowCompactResponse showCompactions() throws TException;
public class IncompatibleMetastoreException extends MetaException {
public IncompatibleMetastoreException(String message) {
Added: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java?rev=1574266&view=auto
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java (added)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java Wed Mar 5 00:20:53 2014
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockLevel;
+import org.apache.hadoop.hive.metastore.api.LockType;
+
+/**
+ * A builder for {@link LockComponent}s
+ */
+public class LockComponentBuilder {
+ private LockComponent component;
+ private boolean tableNameSet;
+ private boolean partNameSet;
+
+ public LockComponentBuilder() {
+ component = new LockComponent();
+ tableNameSet = partNameSet = false;
+ }
+
+ /**
+ * Set the lock to be exclusive.
+ * @return reference to this builder
+ */
+ public LockComponentBuilder setExclusive() {
+ component.setType(LockType.EXCLUSIVE);
+ return this;
+ }
+
+ /**
+ * Set the lock to be semi-shared.
+ * @return reference to this builder
+ */
+ public LockComponentBuilder setSemiShared() {
+ component.setType(LockType.SHARED_WRITE);
+ return this;
+ }
+
+ /**
+ * Set the lock to be shared.
+ * @return reference to this builder
+ */
+ public LockComponentBuilder setShared() {
+ component.setType(LockType.SHARED_READ);
+ return this;
+ }
+
+ /**
+ * Set the database name.
+ * @param dbName database name
+ * @return reference to this builder
+ */
+ public LockComponentBuilder setDbName(String dbName) {
+ component.setDbname(dbName);
+ return this;
+ }
+
+ /**
+ * Set the table name.
+ * @param tableName table name
+ * @return reference to this builder
+ */
+ public LockComponentBuilder setTableName(String tableName) {
+ component.setTablename(tableName);
+ tableNameSet = true;
+ return this;
+ }
+
+ /**
+ * Set the partition name.
+ * @param partitionName partition name
+ * @return reference to this builder
+ */
+ public LockComponentBuilder setPartitionName(String partitionName) {
+ component.setPartitionname(partitionName);
+ partNameSet = true;
+ return this;
+ }
+
+ /**
+ * Get the constructed lock component.
+ * @return lock component.
+ */
+ public LockComponent build() {
+ LockLevel level = LockLevel.DB;
+ if (tableNameSet) level = LockLevel.TABLE;
+ if (partNameSet) level = LockLevel.PARTITION;
+ component.setLevel(level);
+ return component;
+ }
+}
Added: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java?rev=1574266&view=auto
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java (added)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java Wed Mar 5 00:20:53 2014
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockType;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Builder class to make constructing {@link LockRequest} easier.
+ */
+public class LockRequestBuilder {
+
+ private LockRequest req;
+ private LockTrie trie;
+ private boolean userSet;
+
+ public LockRequestBuilder() {
+ req = new LockRequest();
+ trie = new LockTrie();
+ userSet = false;
+ }
+
+ /**
+ * Get the constructed LockRequest.
+ * @return lock request
+ */
+ public LockRequest build() {
+ if (!userSet) {
+ throw new RuntimeException("Cannot build a lock without giving a user");
+ }
+ trie.addLocksToRequest(req);
+ try {
+ req.setHostname(InetAddress.getLocalHost().getHostName());
+ } catch (UnknownHostException e) {
+ throw new RuntimeException("Unable to determine our local host!");
+ }
+ return req;
+ }
+
+ /**
+ * Set the transaction id.
+ * @param txnid transaction id
+ * @return reference to this builder
+ */
+ public LockRequestBuilder setTransactionId(long txnid) {
+ req.setTxnid(txnid);
+ return this;
+ }
+
+ public LockRequestBuilder setUser(String user) {
+ if (user == null) user = "unknown";
+ req.setUser(user);
+ userSet = true;
+ return this;
+ }
+
+ /**
+ * Add a lock component to the lock request
+ * @param component to add
+ * @return reference to this builder
+ */
+ public LockRequestBuilder addLockComponent(LockComponent component) {
+ trie.add(component);
+ return this;
+ }
+
+ // For reasons that are completely incomprehensible to me the semantic
+ // analyzers often ask for multiple locks on the same entity (for example
+ // a shared_read and an exlcusive lock). The db locking system gets confused
+ // by this and dead locks on it. To resolve that, we'll make sure in the
+ // request that multiple locks are coalesced and promoted to the higher
+ // level of locking. To do this we put all locks components in trie based
+ // on dbname, tablename, partition name and handle the promotion as new
+ // requests come in. This structure depends on the fact that null is a
+ // valid key in a HashMap. So a database lock will map to (dbname, null,
+ // null).
+ private static class LockTrie {
+ Map<String, TableTrie> trie;
+
+ LockTrie() {
+ trie = new HashMap<String, TableTrie>();
+ }
+
+ public void add(LockComponent comp) {
+ TableTrie tabs = trie.get(comp.getDbname());
+ if (tabs == null) {
+ tabs = new TableTrie();
+ trie.put(comp.getDbname(), tabs);
+ }
+ setTable(comp, tabs);
+ }
+
+ public void addLocksToRequest(LockRequest request) {
+ for (TableTrie tab : trie.values()) {
+ for (PartTrie part : tab.values()) {
+ for (LockComponent lock : part.values()) {
+ request.addToComponent(lock);
+ }
+ }
+ }
+ }
+
+ private void setTable(LockComponent comp, TableTrie tabs) {
+ PartTrie parts = tabs.get(comp.getTablename());
+ if (parts == null) {
+ parts = new PartTrie();
+ tabs.put(comp.getTablename(), parts);
+ }
+ setPart(comp, parts);
+ }
+
+ private void setPart(LockComponent comp, PartTrie parts) {
+ LockComponent existing = parts.get(comp.getPartitionname());
+ if (existing == null) {
+ // No existing lock for this partition.
+ parts.put(comp.getPartitionname(), comp);
+ } else if (existing.getType() != LockType.EXCLUSIVE &&
+ (comp.getType() == LockType.EXCLUSIVE ||
+ comp.getType() == LockType.SHARED_WRITE)) {
+ // We only need to promote if comp.type is > existing.type. For
+ // efficiency we check if existing is exclusive (in which case we
+ // need never promote) or if comp is exclusive or shared_write (in
+ // which case we can promote even though they may both be shared
+ // write). If comp is shared_read there's never a need to promote.
+ parts.put(comp.getPartitionname(), comp);
+ }
+ }
+
+ private static class TableTrie extends HashMap<String, PartTrie> {
+ }
+
+ private static class PartTrie extends HashMap<String, LockComponent> {
+ }
+
+
+
+ }
+}
Added: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java?rev=1574266&view=auto
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java (added)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java Wed Mar 5 00:20:53 2014
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn;
+
+import org.apache.hadoop.hive.metastore.api.CompactionType;
+
+/**
+ * Information on a possible or running compaction.
+ */
+public class CompactionInfo {
+ public long id;
+ public String dbname;
+ public String tableName;
+ public String partName;
+ public CompactionType type;
+ public String runAs;
+ public boolean tooManyAborts = false;
+
+ private String fullPartitionName = null;
+ private String fullTableName = null;
+
+ public String getFullPartitionName() {
+ if (fullPartitionName == null) {
+ StringBuffer buf = new StringBuffer(dbname);
+ buf.append('.');
+ buf.append(tableName);
+ if (partName != null) {
+ buf.append('.');
+ buf.append(partName);
+ }
+ fullPartitionName = buf.toString();
+ }
+ return fullPartitionName;
+ }
+
+ public String getFullTableName() {
+ if (fullTableName == null) {
+ StringBuffer buf = new StringBuffer(dbname);
+ buf.append('.');
+ buf.append(tableName);
+ fullTableName = buf.toString();
+ }
+ return fullTableName;
+ }
+}