You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/02/12 22:32:35 UTC
svn commit: r1567761 [8/8] - in /hive/trunk: metastore/if/
metastore/src/gen/thrift/gen-cpp/
metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/
metastore/src/gen/thrift/gen-php/metastore/
metastore/src/gen/thrift/gen-py/hive_me...
Modified: hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py Wed Feb 12 21:32:34 2014
@@ -4036,6 +4036,390 @@ class AddPartitionsRequest:
def __ne__(self, other):
return not (self == other)
+class DropPartitionsResult:
+ """
+ Attributes:
+ - partitions
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1
+ )
+
+ def __init__(self, partitions=None,):
+ self.partitions = partitions
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.LIST:
+ self.partitions = []
+ (_etype295, _size292) = iprot.readListBegin()
+ for _i296 in xrange(_size292):
+ _elem297 = Partition()
+ _elem297.read(iprot)
+ self.partitions.append(_elem297)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('DropPartitionsResult')
+ if self.partitions is not None:
+ oprot.writeFieldBegin('partitions', TType.LIST, 1)
+ oprot.writeListBegin(TType.STRUCT, len(self.partitions))
+ for iter298 in self.partitions:
+ iter298.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class DropPartitionsExpr:
+ """
+ Attributes:
+ - expr
+ - partArchiveLevel
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'expr', None, None, ), # 1
+ (2, TType.I32, 'partArchiveLevel', None, None, ), # 2
+ )
+
+ def __init__(self, expr=None, partArchiveLevel=None,):
+ self.expr = expr
+ self.partArchiveLevel = partArchiveLevel
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.expr = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.partArchiveLevel = iprot.readI32();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('DropPartitionsExpr')
+ if self.expr is not None:
+ oprot.writeFieldBegin('expr', TType.STRING, 1)
+ oprot.writeString(self.expr)
+ oprot.writeFieldEnd()
+ if self.partArchiveLevel is not None:
+ oprot.writeFieldBegin('partArchiveLevel', TType.I32, 2)
+ oprot.writeI32(self.partArchiveLevel)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.expr is None:
+ raise TProtocol.TProtocolException(message='Required field expr is unset!')
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class RequestPartsSpec:
+ """
+ Attributes:
+ - names
+ - exprs
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.LIST, 'names', (TType.STRING,None), None, ), # 1
+ (2, TType.LIST, 'exprs', (TType.STRUCT,(DropPartitionsExpr, DropPartitionsExpr.thrift_spec)), None, ), # 2
+ )
+
+ def __init__(self, names=None, exprs=None,):
+ self.names = names
+ self.exprs = exprs
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.LIST:
+ self.names = []
+ (_etype302, _size299) = iprot.readListBegin()
+ for _i303 in xrange(_size299):
+ _elem304 = iprot.readString();
+ self.names.append(_elem304)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.LIST:
+ self.exprs = []
+ (_etype308, _size305) = iprot.readListBegin()
+ for _i309 in xrange(_size305):
+ _elem310 = DropPartitionsExpr()
+ _elem310.read(iprot)
+ self.exprs.append(_elem310)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('RequestPartsSpec')
+ if self.names is not None:
+ oprot.writeFieldBegin('names', TType.LIST, 1)
+ oprot.writeListBegin(TType.STRING, len(self.names))
+ for iter311 in self.names:
+ oprot.writeString(iter311)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.exprs is not None:
+ oprot.writeFieldBegin('exprs', TType.LIST, 2)
+ oprot.writeListBegin(TType.STRUCT, len(self.exprs))
+ for iter312 in self.exprs:
+ iter312.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class DropPartitionsRequest:
+ """
+ Attributes:
+ - dbName
+ - tblName
+ - parts
+ - deleteData
+ - ifExists
+ - ignoreProtection
+ - environmentContext
+ - needResult
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'dbName', None, None, ), # 1
+ (2, TType.STRING, 'tblName', None, None, ), # 2
+ (3, TType.STRUCT, 'parts', (RequestPartsSpec, RequestPartsSpec.thrift_spec), None, ), # 3
+ (4, TType.BOOL, 'deleteData', None, None, ), # 4
+ (5, TType.BOOL, 'ifExists', None, True, ), # 5
+ (6, TType.BOOL, 'ignoreProtection', None, None, ), # 6
+ (7, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 7
+ (8, TType.BOOL, 'needResult', None, True, ), # 8
+ )
+
+ def __init__(self, dbName=None, tblName=None, parts=None, deleteData=None, ifExists=thrift_spec[5][4], ignoreProtection=None, environmentContext=None, needResult=thrift_spec[8][4],):
+ self.dbName = dbName
+ self.tblName = tblName
+ self.parts = parts
+ self.deleteData = deleteData
+ self.ifExists = ifExists
+ self.ignoreProtection = ignoreProtection
+ self.environmentContext = environmentContext
+ self.needResult = needResult
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.dbName = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.tblName = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.parts = RequestPartsSpec()
+ self.parts.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.BOOL:
+ self.deleteData = iprot.readBool();
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.BOOL:
+ self.ifExists = iprot.readBool();
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.BOOL:
+ self.ignoreProtection = iprot.readBool();
+ else:
+ iprot.skip(ftype)
+ elif fid == 7:
+ if ftype == TType.STRUCT:
+ self.environmentContext = EnvironmentContext()
+ self.environmentContext.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 8:
+ if ftype == TType.BOOL:
+ self.needResult = iprot.readBool();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('DropPartitionsRequest')
+ if self.dbName is not None:
+ oprot.writeFieldBegin('dbName', TType.STRING, 1)
+ oprot.writeString(self.dbName)
+ oprot.writeFieldEnd()
+ if self.tblName is not None:
+ oprot.writeFieldBegin('tblName', TType.STRING, 2)
+ oprot.writeString(self.tblName)
+ oprot.writeFieldEnd()
+ if self.parts is not None:
+ oprot.writeFieldBegin('parts', TType.STRUCT, 3)
+ self.parts.write(oprot)
+ oprot.writeFieldEnd()
+ if self.deleteData is not None:
+ oprot.writeFieldBegin('deleteData', TType.BOOL, 4)
+ oprot.writeBool(self.deleteData)
+ oprot.writeFieldEnd()
+ if self.ifExists is not None:
+ oprot.writeFieldBegin('ifExists', TType.BOOL, 5)
+ oprot.writeBool(self.ifExists)
+ oprot.writeFieldEnd()
+ if self.ignoreProtection is not None:
+ oprot.writeFieldBegin('ignoreProtection', TType.BOOL, 6)
+ oprot.writeBool(self.ignoreProtection)
+ oprot.writeFieldEnd()
+ if self.environmentContext is not None:
+ oprot.writeFieldBegin('environmentContext', TType.STRUCT, 7)
+ self.environmentContext.write(oprot)
+ oprot.writeFieldEnd()
+ if self.needResult is not None:
+ oprot.writeFieldBegin('needResult', TType.BOOL, 8)
+ oprot.writeBool(self.needResult)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.dbName is None:
+ raise TProtocol.TProtocolException(message='Required field dbName is unset!')
+ if self.tblName is None:
+ raise TProtocol.TProtocolException(message='Required field tblName is unset!')
+ if self.parts is None:
+ raise TProtocol.TProtocolException(message='Required field parts is unset!')
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class MetaException(TException):
"""
Attributes:
Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb Wed Feb 12 21:32:34 2014
@@ -910,6 +910,103 @@ class AddPartitionsRequest
::Thrift::Struct.generate_accessors self
end
+class DropPartitionsResult
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ PARTITIONS = 1
+
+ FIELDS = {
+ PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class DropPartitionsExpr
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ EXPR = 1
+ PARTARCHIVELEVEL = 2
+
+ FIELDS = {
+ EXPR => {:type => ::Thrift::Types::STRING, :name => 'expr', :binary => true},
+ PARTARCHIVELEVEL => {:type => ::Thrift::Types::I32, :name => 'partArchiveLevel', :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field expr is unset!') unless @expr
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class RequestPartsSpec < ::Thrift::Union
+ include ::Thrift::Struct_Union
+ class << self
+ def names(val)
+ RequestPartsSpec.new(:names, val)
+ end
+
+ def exprs(val)
+ RequestPartsSpec.new(:exprs, val)
+ end
+ end
+
+ NAMES = 1
+ EXPRS = 2
+
+ FIELDS = {
+ NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}},
+ EXPRS => {:type => ::Thrift::Types::LIST, :name => 'exprs', :element => {:type => ::Thrift::Types::STRUCT, :class => ::DropPartitionsExpr}}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise(StandardError, 'Union fields are not set.') if get_set_field.nil? || get_value.nil?
+ end
+
+ ::Thrift::Union.generate_accessors self
+end
+
+class DropPartitionsRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ DBNAME = 1
+ TBLNAME = 2
+ PARTS = 3
+ DELETEDATA = 4
+ IFEXISTS = 5
+ IGNOREPROTECTION = 6
+ ENVIRONMENTCONTEXT = 7
+ NEEDRESULT = 8
+
+ FIELDS = {
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+ TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
+ PARTS => {:type => ::Thrift::Types::STRUCT, :name => 'parts', :class => ::RequestPartsSpec},
+ DELETEDATA => {:type => ::Thrift::Types::BOOL, :name => 'deleteData', :optional => true},
+ IFEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifExists', :default => true, :optional => true},
+ IGNOREPROTECTION => {:type => ::Thrift::Types::BOOL, :name => 'ignoreProtection', :optional => true},
+ ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext, :optional => true},
+ NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field parts is unset!') unless @parts
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
class MetaException < ::Thrift::Exception
include ::Thrift::Struct, ::Thrift::Struct_Union
def initialize(message=nil)
Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Wed Feb 12 21:32:34 2014
@@ -612,6 +612,23 @@ module ThriftHiveMetastore
raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partition_by_name_with_environment_context failed: unknown result')
end
+ def drop_partitions_req(req)
+ send_drop_partitions_req(req)
+ return recv_drop_partitions_req()
+ end
+
+ def send_drop_partitions_req(req)
+ send_message('drop_partitions_req', Drop_partitions_req_args, :req => req)
+ end
+
+ def recv_drop_partitions_req()
+ result = receive_message(Drop_partitions_req_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partitions_req failed: unknown result')
+ end
+
def get_partition(db_name, tbl_name, part_vals)
send_get_partition(db_name, tbl_name, part_vals)
return recv_get_partition()
@@ -1963,6 +1980,19 @@ module ThriftHiveMetastore
write_result(result, oprot, 'drop_partition_by_name_with_environment_context', seqid)
end
+ def process_drop_partitions_req(seqid, iprot, oprot)
+ args = read_args(iprot, Drop_partitions_req_args)
+ result = Drop_partitions_req_result.new()
+ begin
+ result.success = @handler.drop_partitions_req(args.req)
+ rescue ::NoSuchObjectException => o1
+ result.o1 = o1
+ rescue ::MetaException => o2
+ result.o2 = o2
+ end
+ write_result(result, oprot, 'drop_partitions_req', seqid)
+ end
+
def process_get_partition(seqid, iprot, oprot)
args = read_args(iprot, Get_partition_args)
result = Get_partition_result.new()
@@ -3997,6 +4027,42 @@ module ThriftHiveMetastore
::Thrift::Struct.generate_accessors self
end
+ class Drop_partitions_req_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ REQ = 1
+
+ FIELDS = {
+ REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::DropPartitionsRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Drop_partitions_req_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::DropPartitionsResult},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
class Get_partition_args
include ::Thrift::Struct, ::Thrift::Struct_Union
DB_NAME = 1
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Wed Feb 12 21:32:34 2014
@@ -50,6 +50,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.LogUtils;
import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
@@ -66,6 +67,9 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsResult;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -89,6 +93,7 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.Table;
@@ -1398,6 +1403,7 @@ public class HiveMetaStore extends Thrif
tableDnsPath = wh.getDnsPath(tablePath);
}
List<Path> partPaths = new ArrayList<Path>();
+ Table tbl = ms.getTable(dbName, tableName);
// call dropPartition on each of the table's partitions to follow the
// procedure for cleanly dropping partitions.
@@ -1406,6 +1412,7 @@ public class HiveMetaStore extends Thrif
if (partsToDelete == null || partsToDelete.isEmpty()) {
break;
}
+ List<String> partNames = new ArrayList<String>();
for (Partition part : partsToDelete) {
if (checkLocation && part.getSd() != null &&
part.getSd().getLocation() != null) {
@@ -1422,8 +1429,9 @@ public class HiveMetaStore extends Thrif
partPaths.add(partPath);
}
}
- ms.dropPartition(dbName, tableName, part.getValues());
+ partNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()));
}
+ ms.dropPartitions(dbName, tableName, partNames);
}
return partPaths;
@@ -2150,11 +2158,7 @@ public class HiveMetaStore extends Thrif
isArchived = MetaStoreUtils.isArchived(part);
if (isArchived) {
archiveParentDir = MetaStoreUtils.getOriginalLocation(part);
- if (!wh.isWritable(archiveParentDir.getParent())) {
- throw new MetaException("Table partition not deleted since " +
- archiveParentDir.getParent() + " is not writable by " +
- hiveConf.getUser());
- }
+ verifyIsWritablePath(archiveParentDir);
}
if (!ms.dropPartition(db_name, tbl_name, part_vals)) {
throw new MetaException("Unable to drop partition");
@@ -2162,11 +2166,7 @@ public class HiveMetaStore extends Thrif
success = ms.commitTransaction();
if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
partPath = new Path(part.getSd().getLocation());
- if (!wh.isWritable(partPath.getParent())) {
- throw new MetaException("Table partition not deleted since " +
- partPath.getParent() + " is not writable by " +
- hiveConf.getUser());
- }
+ verifyIsWritablePath(partPath);
}
} finally {
if (!success) {
@@ -2211,6 +2211,162 @@ public class HiveMetaStore extends Thrif
null);
}
+ private static class PathAndPartValSize {
+ public PathAndPartValSize(Path path, int partValSize) {
+ this.path = path;
+ this.partValSize = partValSize;
+ }
+ public Path path;
+ public int partValSize;
+ }
+
+ @Override
+ public DropPartitionsResult drop_partitions_req(
+ DropPartitionsRequest request) throws MetaException, NoSuchObjectException, TException {
+ RawStore ms = getMS();
+ String dbName = request.getDbName(), tblName = request.getTblName();
+ boolean ifExists = request.isSetIfExists() && request.isIfExists();
+ boolean deleteData = request.isSetDeleteData() && request.isDeleteData();
+ boolean ignoreProtection = request.isSetIgnoreProtection() && request.isIgnoreProtection();
+ boolean needResult = !request.isSetNeedResult() || request.isNeedResult();
+ List<PathAndPartValSize> dirsToDelete = new ArrayList<PathAndPartValSize>();
+ List<Path> archToDelete = new ArrayList<Path>();
+ EnvironmentContext envContext = request.isSetEnvironmentContext()
+ ? request.getEnvironmentContext() : null;
+
+ boolean success = false;
+ ms.openTransaction();
+ Table tbl = null;
+ List<Partition> parts = null;
+ try {
+ // We need Partition-s for firing events and for result; DN needs MPartition-s to drop.
+ // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes.
+ tbl = get_table(dbName, tblName);
+ int minCount = 0;
+ RequestPartsSpec spec = request.getParts();
+ List<String> partNames = null;
+ if (spec.isSetExprs()) {
+ // Dropping by expressions.
+ parts = new ArrayList<Partition>(spec.getExprs().size());
+ for (DropPartitionsExpr expr : spec.getExprs()) {
+ ++minCount; // At least one partition per expression, if not ifExists
+ List<Partition> result = new ArrayList<Partition>();
+ boolean hasUnknown = ms.getPartitionsByExpr(
+ dbName, tblName, expr.getExpr(), null, (short)-1, result);
+ if (hasUnknown) {
+ // Expr is built by DDLSA, it should only contain part cols and simple ops
+ throw new MetaException("Unexpected unknown partitions to drop");
+ }
+ // this is to prevent dropping archived partition which is archived in a
+ // different level the drop command specified.
+ if (!ignoreProtection && expr.isSetPartArchiveLevel()) {
+ for (Partition part : parts) {
+ if (MetaStoreUtils.isArchived(part)
+ && MetaStoreUtils.getArchivingLevel(part) < expr.getPartArchiveLevel()) {
+ throw new MetaException("Cannot drop a subset of partitions "
+ + " in an archive, partition " + part);
+ }
+ }
+ }
+ parts.addAll(result);
+ }
+ } else if (spec.isSetNames()) {
+ partNames = spec.getNames();
+ minCount = partNames.size();
+ parts = ms.getPartitionsByNames(dbName, tblName, partNames);
+ } else {
+ throw new MetaException("Partition spec is not set");
+ }
+
+ if ((parts.size() < minCount) && !ifExists) {
+ throw new NoSuchObjectException("Some partitions to drop are missing");
+ }
+
+ List<String> colNames = null;
+ if (partNames == null) {
+ partNames = new ArrayList<String>(parts.size());
+ colNames = new ArrayList<String>(tbl.getPartitionKeys().size());
+ for (FieldSchema col : tbl.getPartitionKeys()) {
+ colNames.add(col.getName());
+ }
+ }
+
+ for (Partition part : parts) {
+ if (!ignoreProtection && !MetaStoreUtils.canDropPartition(tbl, part)) {
+ throw new MetaException("Table " + tbl.getTableName()
+ + " Partition " + part + " is protected from being dropped");
+ }
+
+ firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
+ if (colNames != null) {
+ partNames.add(FileUtils.makePartName(colNames, part.getValues()));
+ }
+ // Preserve the old behavior of failing when we cannot write, even w/o deleteData,
+ // and even if the table is external. That might not make any sense.
+ if (MetaStoreUtils.isArchived(part)) {
+ Path archiveParentDir = MetaStoreUtils.getOriginalLocation(part);
+ verifyIsWritablePath(archiveParentDir);
+ archToDelete.add(archiveParentDir);
+ }
+ if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
+ Path partPath = new Path(part.getSd().getLocation());
+ verifyIsWritablePath(partPath);
+ dirsToDelete.add(new PathAndPartValSize(partPath, part.getValues().size()));
+ }
+ }
+
+ ms.dropPartitions(dbName, tblName, partNames);
+ success = ms.commitTransaction();
+ DropPartitionsResult result = new DropPartitionsResult();
+ if (needResult) {
+ result.setPartitions(parts);
+ }
+ return result;
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (deleteData && !isExternal(tbl)) {
+ // Archived partitions have har:/to_har_file as their location.
+ // The original directory was saved in params
+ for (Path path : archToDelete) {
+ wh.deleteDir(path, true);
+ }
+ for (PathAndPartValSize p : dirsToDelete) {
+ wh.deleteDir(p.path, true);
+ try {
+ deleteParentRecursive(p.path.getParent(), p.partValSize - 1);
+ } catch (IOException ex) {
+ LOG.warn("Error from deleteParentRecursive", ex);
+ throw new MetaException("Failed to delete parent: " + ex.getMessage());
+ }
+ }
+ }
+ if (parts != null) {
+ for (Partition part : parts) {
+ for (MetaStoreEventListener listener : listeners) {
+ DropPartitionEvent dropPartitionEvent =
+ new DropPartitionEvent(tbl, part, success, deleteData, this);
+ dropPartitionEvent.setEnvironmentContext(envContext);
+ listener.onDropPartition(dropPartitionEvent);
+ }
+ }
+ }
+ }
+ }
+
+ private void verifyIsWritablePath(Path dir) throws MetaException {
+ try {
+ if (!wh.isWritable(dir.getParent())) {
+ throw new MetaException("Table partition not deleted since " + dir.getParent()
+ + " is not writable by " + hiveConf.getUser());
+ }
+ } catch (IOException ex) {
+ LOG.warn("Error from isWritable", ex);
+ throw new MetaException("Table partition not deleted since " + dir.getParent()
+ + " access cannot be checked: " + ex.getMessage());
+ }
+ }
+
@Override
public boolean drop_partition_with_environment_context(final String db_name,
final String tbl_name, final List<String> part_vals, final boolean deleteData,
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Wed Feb 12 21:32:34 2014
@@ -43,6 +43,7 @@ import javax.security.auth.login.LoginEx
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
@@ -52,6 +53,9 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsResult;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -71,6 +75,7 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
@@ -634,6 +639,27 @@ public class HiveMetaStoreClient impleme
envContext);
}
+ @Override
+ public List<Partition> dropPartitions(String dbName, String tblName,
+ List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+ boolean ifExists) throws NoSuchObjectException, MetaException, TException {
+ RequestPartsSpec rps = new RequestPartsSpec();
+ List<DropPartitionsExpr> exprs = new ArrayList<DropPartitionsExpr>(partExprs.size());
+ for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
+ DropPartitionsExpr dpe = new DropPartitionsExpr();
+ dpe.setExpr(partExpr.getSecond());
+ dpe.setPartArchiveLevel(partExpr.getFirst());
+ exprs.add(dpe);
+ }
+ rps.setExprs(exprs);
+ DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
+ req.setDeleteData(deleteData);
+ req.setIgnoreProtection(ignoreProtection);
+ req.setNeedResult(true);
+ req.setIfExists(ifExists);
+ return client.drop_partitions_req(req).getPartitions();
+ }
+
/**
* @param name
* @param dbname
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Wed Feb 12 21:32:34 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.metastore
import java.util.List;
import java.util.Map;
+import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -587,6 +588,10 @@ public interface IMetaStoreClient {
List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
MetaException, TException;
+ List<Partition> dropPartitions(String dbName, String tblName,
+ List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+ boolean ifExists) throws NoSuchObjectException, MetaException, TException;
+
public boolean dropPartition(String db_name, String tbl_name,
String name, boolean deleteData) throws NoSuchObjectException,
MetaException, TException;
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java Wed Feb 12 21:32:34 2014
@@ -33,6 +33,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicLong;
import javax.jdo.PersistenceManager;
import javax.jdo.Query;
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Wed Feb 12 21:32:34 2014
@@ -1424,4 +1424,43 @@ public class MetaStoreUtils {
return null;
}
+ public static ProtectMode getProtectMode(Partition partition) {
+ return getProtectMode(partition.getParameters());
+ }
+
+ public static ProtectMode getProtectMode(Table table) {
+ return getProtectMode(table.getParameters());
+ }
+
+ private static ProtectMode getProtectMode(Map<String, String> parameters) {
+ if (parameters == null) {
+ return null;
+ }
+
+ if (!parameters.containsKey(ProtectMode.PARAMETER_NAME)) {
+ return new ProtectMode();
+ } else {
+ return ProtectMode.getProtectModeFromString(parameters.get(ProtectMode.PARAMETER_NAME));
+ }
+ }
+
+ public static boolean canDropPartition(Table table, Partition partition) {
+ ProtectMode mode = getProtectMode(partition);
+ ProtectMode parentMode = getProtectMode(table);
+ return (!mode.noDrop && !mode.offline && !mode.readOnly && !parentMode.noDropCascade);
+ }
+
+ public static String ARCHIVING_LEVEL = "archiving_level";
+ public static int getArchivingLevel(Partition part) throws MetaException {
+ if (!isArchived(part)) {
+ throw new MetaException("Getting level of unarchived partition");
+ }
+
+ String lv = part.getParameters().get(ARCHIVING_LEVEL);
+ if (lv != null) {
+ return Integer.parseInt(lv);
+ } else { // partitions archived before introducing multiple archiving
+ return part.getValues().size();
+ }
+ }
}
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Wed Feb 12 21:32:34 2014
@@ -57,6 +57,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceStability;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -1322,7 +1323,7 @@ public class ObjectStore implements RawS
return null;
}
// Change the query to use part_vals instead of the name which is
- // redundant
+ // redundant TODO: callers of this often get part_vals out of name for no reason...
String name = Warehouse.makePartName(convertToFieldSchemas(mtbl
.getPartitionKeys()), part_vals);
Query query = pm.newQuery(MPartition.class,
@@ -1421,6 +1422,33 @@ public class ObjectStore implements RawS
return success;
}
+ @Override
+ public void dropPartitions(String dbName, String tblName, List<String> partNames)
+ throws MetaException, NoSuchObjectException {
+ if (partNames.isEmpty()) return;
+ boolean success = false;
+ openTransaction();
+ try {
+ // Delete all things.
+ dropPartitionGrantsNoTxn(dbName, tblName, partNames);
+ dropPartitionAllColumnGrantsNoTxn(dbName, tblName, partNames);
+ dropPartitionColumnStatisticsNoTxn(dbName, tblName, partNames);
+
+ // CDs are reused; go thry partition SDs, detach all CDs from SDs, then remove unused CDs.
+ for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(dbName, tblName, partNames)) {
+ removeUnusedColumnDescriptor(mcd);
+ }
+ dropPartitionsNoTxn(dbName, tblName, partNames);
+ if (!(success = commitTransaction())) {
+ throw new MetaException("Failed to drop partitions"); // Should not happen?
+ }
+ } finally {
+ if (!success) {
+ rollbackTransaction();
+ }
+ }
+ }
+
/**
* Drop an MPartition and cascade deletes (e.g., delete partition privilege grants,
* drop the storage descriptor cleanly, etc.)
@@ -1447,7 +1475,7 @@ public class ObjectStore implements RawS
List<MPartitionPrivilege> partGrants = listPartitionGrants(
part.getTable().getDatabase().getName(),
part.getTable().getTableName(),
- partName);
+ Lists.newArrayList(partName));
if (partGrants != null && partGrants.size() > 0) {
pm.deletePersistentAll(partGrants);
@@ -1456,7 +1484,7 @@ public class ObjectStore implements RawS
List<MPartitionColumnPrivilege> partColumnGrants = listPartitionAllColumnGrants(
part.getTable().getDatabase().getName(),
part.getTable().getTableName(),
- partName);
+ Lists.newArrayList(partName));
if (partColumnGrants != null && partColumnGrants.size() > 0) {
pm.deletePersistentAll(partColumnGrants);
}
@@ -1981,6 +2009,10 @@ public class ObjectStore implements RawS
return results;
}
+ private static class Out<T> {
+ public T val;
+ }
+
/**
* Gets partition names from the table via ORM (JDOQL) name filter.
* @param dbName Database name.
@@ -1993,6 +2025,70 @@ public class ObjectStore implements RawS
if (partNames.isEmpty()) {
return new ArrayList<Partition>();
}
+ Out<Query> query = new Out<Query>();
+ List<MPartition> mparts = null;
+ try {
+ mparts = getMPartitionsViaOrmFilter(dbName, tblName, partNames, query);
+ return convertToParts(dbName, tblName, mparts);
+ } finally {
+ if (query.val != null) {
+ query.val.closeAll();
+ }
+ }
+ }
+
+ private void dropPartitionsNoTxn(String dbName, String tblName, List<String> partNames) {
+ ObjectPair<Query, Map<String, String>> queryWithParams =
+ getPartQueryWithParams(dbName, tblName, partNames);
+ Query query = queryWithParams.getFirst();
+ query.setClass(MPartition.class);
+ long deleted = query.deletePersistentAll(queryWithParams.getSecond());
+ LOG.debug("Deleted " + deleted + " partition from store");
+ query.closeAll();
+ }
+
+ /**
+ * Detaches column descriptors from storage descriptors; returns the set of unique CDs
+ * thus detached. This is done before dropping partitions because CDs are reused between
+ * SDs; so, we remove the links to delete SDs and then check the returned CDs to see if
+ * they are referenced by other SDs.
+ */
+ private HashSet<MColumnDescriptor> detachCdsFromSdsNoTxn(
+ String dbName, String tblName, List<String> partNames) {
+ ObjectPair<Query, Map<String, String>> queryWithParams =
+ getPartQueryWithParams(dbName, tblName, partNames);
+ Query query = queryWithParams.getFirst();
+ query.setClass(MPartition.class);
+ query.setResult("sd");
+ @SuppressWarnings("unchecked")
+ List<MStorageDescriptor> sds = (List<MStorageDescriptor>)query.executeWithMap(
+ queryWithParams.getSecond());
+ HashSet<MColumnDescriptor> candidateCds = new HashSet<MColumnDescriptor>();
+ for (MStorageDescriptor sd : sds) {
+ if (sd != null && sd.getCD() != null) {
+ candidateCds.add(sd.getCD());
+ sd.setCD(null);
+ }
+ }
+ return candidateCds;
+ }
+
+ private List<MPartition> getMPartitionsViaOrmFilter(String dbName,
+ String tblName, List<String> partNames, Out<Query> out) {
+ ObjectPair<Query, Map<String, String>> queryWithParams =
+ getPartQueryWithParams(dbName, tblName, partNames);
+ Query query = out.val = queryWithParams.getFirst();
+ query.setResultClass(MPartition.class);
+ query.setClass(MPartition.class);
+ query.setOrdering("partitionName ascending");
+
+ @SuppressWarnings("unchecked")
+ List<MPartition> result = (List<MPartition>)query.executeWithMap(queryWithParams.getSecond());
+ return result;
+ }
+
+ private ObjectPair<Query, Map<String, String>> getPartQueryWithParams(
+ String dbName, String tblName, List<String> partNames) {
StringBuilder sb = new StringBuilder(
"table.tableName == t1 && table.database.name == t2 && (");
int n = 0;
@@ -2008,24 +2104,15 @@ public class ObjectStore implements RawS
sb.setLength(sb.length() - 4); // remove the last " || "
sb.append(')');
- Query query = pm.newQuery(MPartition.class, sb.toString());
+ Query query = pm.newQuery();
+ query.setFilter(sb.toString());
LOG.debug(" JDOQL filter is " + sb.toString());
- params.put("t1", tblName.trim());
- params.put("t2", dbName.trim());
-
- String parameterDeclaration = makeParameterDeclarationString(params);
+ params.put("t1", tblName.trim().toLowerCase());
+ params.put("t2", dbName.trim().toLowerCase());
- query.declareParameters(parameterDeclaration);
- query.setOrdering("partitionName ascending");
-
- @SuppressWarnings("unchecked")
- List<MPartition> mparts = (List<MPartition>) query.executeWithMap(params);
- // pm.retrieveAll(mparts); // retrieveAll is pessimistic. some fields may not be needed
- List<Partition> results = convertToParts(dbName, tblName, mparts);
- // pm.makeTransientAll(mparts); // makeTransient will prohibit future access of unfetched fields
- query.closeAll();
- return results;
+ query.declareParameters(makeParameterDeclarationString(params));
+ return new ObjectPair<Query, Map<String,String>>(query, params);
}
@Override
@@ -4182,7 +4269,7 @@ public class ObjectStore implements RawS
@SuppressWarnings("unchecked")
public List<MPartitionColumnPrivilege> listPartitionAllColumnGrants(String dbName,
- String tableName, String partName) {
+ String tableName, List<String> partNames) {
boolean success = false;
tableName = tableName.toLowerCase().trim();
dbName = dbName.toLowerCase().trim();
@@ -4191,12 +4278,9 @@ public class ObjectStore implements RawS
try {
openTransaction();
LOG.debug("Executing listPartitionAllColumnGrants");
- String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2 && partition.partitionName == t3";
- Query query = pm.newQuery(MPartitionColumnPrivilege.class, queryStr);
- query.declareParameters(
- "java.lang.String t1, java.lang.String t2, java.lang.String t3");
- mSecurityColList = (List<MPartitionColumnPrivilege>) query
- .executeWithArray(tableName, dbName, partName);
+ mSecurityColList = queryByPartitionNames(
+ dbName, tableName, partNames, MPartitionColumnPrivilege.class,
+ "partition.table.tableName", "partition.table.database.name", "partition.partitionName");
LOG.debug("Done executing query for listPartitionAllColumnGrants");
pm.retrieveAll(mSecurityColList);
success = commitTransaction();
@@ -4209,6 +4293,14 @@ public class ObjectStore implements RawS
return mSecurityColList;
}
+ public void dropPartitionAllColumnGrantsNoTxn(
+ String dbName, String tableName, List<String> partNames) {
+ ObjectPair<Query, Object[]> queryWithParams = makeQueryByPartitionNames(
+ dbName, tableName, partNames, MPartitionColumnPrivilege.class,
+ "partition.table.tableName", "partition.table.database.name", "partition.partitionName");
+ queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond());
+ }
+
@SuppressWarnings("unchecked")
private List<MDBPrivilege> listDatabaseGrants(String dbName) {
dbName = dbName.toLowerCase().trim();
@@ -4236,7 +4328,7 @@ public class ObjectStore implements RawS
@SuppressWarnings("unchecked")
private List<MPartitionPrivilege> listPartitionGrants(String dbName, String tableName,
- String partName) {
+ List<String> partNames) {
tableName = tableName.toLowerCase().trim();
dbName = dbName.toLowerCase().trim();
@@ -4245,12 +4337,9 @@ public class ObjectStore implements RawS
try {
openTransaction();
LOG.debug("Executing listPartitionGrants");
- Query query = pm.newQuery(MPartitionPrivilege.class,
- "partition.table.tableName == t1 && partition.table.database.name == t2 && partition.partitionName == t3");
- query.declareParameters(
- "java.lang.String t1, java.lang.String t2, java.lang.String t3");
- mSecurityTabPartList = (List<MPartitionPrivilege>) query
- .executeWithArray(tableName, dbName, partName);
+ mSecurityTabPartList = queryByPartitionNames(
+ dbName, tableName, partNames, MPartitionPrivilege.class, "partition.table.tableName",
+ "partition.table.database.name", "partition.partitionName");
LOG.debug("Done executing query for listPartitionGrants");
pm.retrieveAll(mSecurityTabPartList);
success = commitTransaction();
@@ -4263,6 +4352,42 @@ public class ObjectStore implements RawS
return mSecurityTabPartList;
}
+ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List<String> partNames) {
+ ObjectPair<Query, Object[]> queryWithParams = makeQueryByPartitionNames(
+ dbName, tableName, partNames,MPartitionPrivilege.class, "partition.table.tableName",
+ "partition.table.database.name", "partition.partitionName");
+ queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond());
+ }
+
+ @SuppressWarnings("unchecked")
+ private <T> List<T> queryByPartitionNames(String dbName, String tableName,
+ List<String> partNames, Class<T> clazz, String tbCol, String dbCol, String partCol) {
+ ObjectPair<Query, Object[]> queryAndParams = makeQueryByPartitionNames(
+ dbName, tableName, partNames, clazz, tbCol, dbCol, partCol);
+ return (List<T>)queryAndParams.getFirst().executeWithArray(queryAndParams.getSecond());
+ }
+
+ private ObjectPair<Query, Object[]> makeQueryByPartitionNames(
+ String dbName, String tableName, List<String> partNames, Class<?> clazz,
+ String tbCol, String dbCol, String partCol) {
+ String queryStr = tbCol + " == t1 && " + dbCol + " == t2";
+ String paramStr = "java.lang.String t1, java.lang.String t2";
+ Object[] params = new Object[2 + partNames.size()];
+ params[0] = tableName;
+ params[1] = dbName;
+ int index = 0;
+ for (String partName : partNames) {
+ params[index + 2] = partName;
+ queryStr += ((index == 0) ? " && (" : " || ") + partCol + " == p" + index;
+ paramStr += ", java.lang.String p" + index;
+ ++index;
+ }
+ queryStr += ")";
+ Query query = pm.newQuery(clazz, queryStr);
+ query.declareParameters(paramStr);
+ return new ObjectPair<Query, Object[]>(query, params);
+ }
+
@SuppressWarnings("unchecked")
public List<MTablePrivilege> listAllTableGrants(
String principalName, PrincipalType principalType, String dbName,
@@ -5616,8 +5741,16 @@ public class ObjectStore implements RawS
}
}
+ private void dropPartitionColumnStatisticsNoTxn(
+ String dbName, String tableName, List<String> partNames) throws MetaException {
+ ObjectPair<Query, Object[]> queryWithParams = makeQueryByPartitionNames(
+ dbName, tableName, partNames, MPartitionColumnStatistics.class,
+ "tableName", "dbName", "partition.partitionName");
+ queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond());
+ }
+
public boolean deletePartitionColumnStatistics(String dbName, String tableName,
- String partName, List<String> partVals,String colName)
+ String partName, List<String> partVals, String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
boolean ret = false;
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Wed Feb 12 21:32:34 2014
@@ -454,6 +454,9 @@ public interface RawStore extends Config
public abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException;
+ void dropPartitions(String dbName, String tblName, List<String> partNames)
+ throws MetaException, NoSuchObjectException;
+
List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
String principalName, PrincipalType principalType);
Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java (original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java Wed Feb 12 21:32:34 2014
@@ -656,4 +656,10 @@ public class DummyRawStoreControlledComm
throws InvalidObjectException, MetaException {
return objectStore.addPartitions(dbName, tblName, parts);
}
+
+ @Override
+ public void dropPartitions(String dbName, String tblName, List<String> partNames)
+ throws MetaException, NoSuchObjectException {
+ objectStore.dropPartitions(dbName, tblName, partNames);
+ }
}
Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java (original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java Wed Feb 12 21:32:34 2014
@@ -686,6 +686,10 @@ public class DummyRawStoreForJdoConnecti
throws InvalidObjectException, MetaException {
return false;
}
+
+ @Override
+ public void dropPartitions(String dbName, String tblName, List<String> partNames) {
+ }
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java Wed Feb 12 21:32:34 2014
@@ -32,6 +32,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -202,13 +203,8 @@ public final class ArchiveUtils {
* @return is it archived?
*/
public static boolean isArchived(Partition p) {
- Map<String, String> params = p.getParameters();
- if ("true".equalsIgnoreCase(params.get(
- org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.IS_ARCHIVED))) {
- return true;
- } else {
- return false;
- }
+ return MetaStoreUtils.isArchived(p.getTPartition());
+
}
/**
@@ -216,16 +212,10 @@ public final class ArchiveUtils {
* specification ARCHIVE was run for
*/
public static int getArchivingLevel(Partition p) throws HiveException {
- if(!isArchived(p)) {
- throw new HiveException("Getting level of unarchived partition");
- }
-
- Map<String, String> params = p.getParameters();
- String lv = params.get(ArchiveUtils.ARCHIVING_LEVEL);
- if(lv != null) {
- return Integer.parseInt(lv);
- } else { // partitions archived before introducing multiple archiving
- return p.getValues().size();
+ try {
+ return MetaStoreUtils.getArchivingLevel(p.getTPartition());
+ } catch (MetaException ex) {
+ throw new HiveException(ex.getMessage(), ex);
}
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Wed Feb 12 21:32:34 2014
@@ -294,7 +294,8 @@ public class DDLTask extends Task<DDLWor
DropTableDesc dropTbl = work.getDropTblDesc();
if (dropTbl != null) {
- return dropTable(db, dropTbl);
+ dropTableOrPartitions(db, dropTbl);
+ return 0;
}
AlterTableDesc alterTbl = work.getAlterTblDesc();
@@ -3522,20 +3523,17 @@ public class DDLTask extends Task<DDLWor
}
/**
- * Drop a given table.
+ * Drop a given table or some partitions. DropTableDesc is currently used for both.
*
* @param db
* The database in question.
* @param dropTbl
* This is the table we're dropping.
- * @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
- private int dropTable(Hive db, DropTableDesc dropTbl)
- throws HiveException {
- // We need to fetch the table before it is dropped so that it can be passed
- // to
+ private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException {
+ // We need to fetch the table before it is dropped so that it can be passed to
// post-execution hook
Table tbl = null;
try {
@@ -3545,112 +3543,74 @@ public class DDLTask extends Task<DDLWor
}
if (dropTbl.getPartSpecs() == null) {
- // This is a true DROP TABLE
- if (tbl != null) {
- if (tbl.isView()) {
- if (!dropTbl.getExpectView()) {
- if (dropTbl.getIfExists()) {
- return 0;
- }
- throw new HiveException("Cannot drop a view with DROP TABLE");
+ dropTable(db, tbl, dropTbl);
+ } else {
+ dropPartitions(db, tbl, dropTbl);
+ }
+ }
+
+ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException {
+ // ifExists is currently verified in DDLSemanticAnalyzer
+ List<Partition> droppedParts = db.dropPartitions(dropTbl.getTableName(),
+ dropTbl.getPartSpecs(), true, dropTbl.getIgnoreProtection(), true);
+ for (Partition partition : droppedParts) {
+ console.printInfo("Dropped the partition " + partition.getName());
+ work.getOutputs().add(new WriteEntity(partition));
+ };
+ }
+
+ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException {
+ // This is a true DROP TABLE
+ if (tbl != null) {
+ if (tbl.isView()) {
+ if (!dropTbl.getExpectView()) {
+ if (dropTbl.getIfExists()) {
+ return;
}
- } else {
- if (dropTbl.getExpectView()) {
- if (dropTbl.getIfExists()) {
- return 0;
- }
- throw new HiveException(
- "Cannot drop a base table with DROP VIEW");
+ throw new HiveException("Cannot drop a view with DROP TABLE");
+ }
+ } else {
+ if (dropTbl.getExpectView()) {
+ if (dropTbl.getIfExists()) {
+ return;
}
+ throw new HiveException(
+ "Cannot drop a base table with DROP VIEW");
}
}
+ }
- if (tbl != null && !tbl.canDrop()) {
- throw new HiveException("Table " + tbl.getTableName() +
- " is protected from being dropped");
- }
-
- int partitionBatchSize = HiveConf.getIntVar(conf,
- ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX);
-
- // We should check that all the partitions of the table can be dropped
- if (tbl != null && tbl.isPartitioned()) {
- List<String> partitionNames = db.getPartitionNames(tbl.getTableName(), (short)-1);
+ if (tbl != null && !tbl.canDrop()) {
+ throw new HiveException("Table " + tbl.getTableName() +
+ " is protected from being dropped");
+ }
- for(int i=0; i < partitionNames.size(); i+= partitionBatchSize) {
- List<String> partNames = partitionNames.subList(i, Math.min(i+partitionBatchSize,
- partitionNames.size()));
- List<Partition> listPartitions = db.getPartitionsByNames(tbl, partNames);
- for (Partition p: listPartitions) {
- if (!p.canDrop()) {
- throw new HiveException("Table " + tbl.getTableName() +
- " Partition" + p.getName() +
- " is protected from being dropped");
- }
- }
- }
- }
+ int partitionBatchSize = HiveConf.getIntVar(conf,
+ ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX);
- // drop the table
- db.dropTable(dropTbl.getTableName());
- if (tbl != null) {
- work.getOutputs().add(new WriteEntity(tbl));
- }
- } else {
- // This is actually an ALTER TABLE DROP PARTITION
- List<Partition> partsToDelete = new ArrayList<Partition>();
- for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()) {
- List<Partition> partitions = new ArrayList<Partition>();
- boolean hasUnknown;
- try {
- hasUnknown = db.getPartitionsByExpr(tbl, partSpec.getPartSpec(), conf, partitions);
- } catch (TException e) {
- throw new HiveException(e);
- }
- if (hasUnknown) {
- throw new HiveException("Unexpected unknown partititions from "
- + partSpec.getPartSpec().getExprString());
- }
+ // We should check that all the partitions of the table can be dropped
+ if (tbl != null && tbl.isPartitioned()) {
+ List<String> partitionNames = db.getPartitionNames(tbl.getTableName(), (short)-1);
- // this is to prevent dropping archived partition which is archived in a
- // different level the drop command specified.
- int partPrefixToDrop = 0;
- for (FieldSchema fs : tbl.getPartCols()) {
- if (partSpec.getPartSpecKeys().contains(fs.getName())) {
- partPrefixToDrop += 1;
- } else {
- break;
- }
- }
- if (!dropTbl.getIgnoreProtection()) {
- for (Partition p : partitions) {
- if (!p.canDrop()) {
- throw new HiveException("Table " + tbl.getTableName()
- + " Partition " + p.getName()
- + " is protected from being dropped");
- } else if (ArchiveUtils.isArchived(p)) {
- int partAchiveLevel = ArchiveUtils.getArchivingLevel(p);
- // trying to drop partitions inside a har, disallow it.
- if (partAchiveLevel < partPrefixToDrop) {
- throw new HiveException(
- "Cannot drop a subset of partitions in an archive, partition "
- + p.getName());
- }
- }
+ for(int i=0; i < partitionNames.size(); i+= partitionBatchSize) {
+ List<String> partNames = partitionNames.subList(i, Math.min(i+partitionBatchSize,
+ partitionNames.size()));
+ List<Partition> listPartitions = db.getPartitionsByNames(tbl, partNames);
+ for (Partition p: listPartitions) {
+ if (!p.canDrop()) {
+ throw new HiveException("Table " + tbl.getTableName() +
+ " Partition" + p.getName() +
+ " is protected from being dropped");
}
}
- partsToDelete.addAll(partitions);
- }
-
- // drop all existing partitions from the list
- for (Partition partition : partsToDelete) {
- console.printInfo("Dropping the partition " + partition.getName());
- db.dropPartition(dropTbl.getTableName(), partition.getValues(), true);
- work.getOutputs().add(new WriteEntity(partition));
}
}
- return 0;
+ // drop the table
+ db.dropTable(dropTbl.getTableName());
+ if (tbl != null) {
+ work.getOutputs().add(new WriteEntity(tbl));
+ }
}
/**
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Wed Feb 12 21:32:34 2014
@@ -49,6 +49,7 @@ import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.HiveStatsUtils;
+import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
@@ -87,6 +88,7 @@ import org.apache.hadoop.hive.ql.exec.Ut
import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
+import org.apache.hadoop.hive.ql.plan.DropTableDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant;
import org.apache.hadoop.hive.ql.session.SessionState;
@@ -1657,6 +1659,34 @@ private void constructOneLBLocationMap(F
}
}
+ public List<Partition> dropPartitions(String tblName, List<DropTableDesc.PartSpec> partSpecs,
+ boolean deleteData, boolean ignoreProtection, boolean ifExists) throws HiveException {
+ Table t = newTable(tblName);
+ return dropPartitions(
+ t.getDbName(), t.getTableName(), partSpecs, deleteData, ignoreProtection, ifExists);
+ }
+
+ public List<Partition> dropPartitions(String dbName, String tblName,
+ List<DropTableDesc.PartSpec> partSpecs, boolean deleteData, boolean ignoreProtection,
+ boolean ifExists) throws HiveException {
+ try {
+ Table tbl = getTable(dbName, tblName);
+ List<ObjectPair<Integer, byte[]>> partExprs =
+ new ArrayList<ObjectPair<Integer,byte[]>>(partSpecs.size());
+ for (DropTableDesc.PartSpec partSpec : partSpecs) {
+ partExprs.add(new ObjectPair<Integer, byte[]>(partSpec.getPrefixLength(),
+ Utilities.serializeExpressionToKryo(partSpec.getPartSpec())));
+ }
+ List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().dropPartitions(
+ dbName, tblName, partExprs, deleteData, ignoreProtection, ifExists);
+ return convertFromMetastore(tbl, tParts, null);
+ } catch (NoSuchObjectException e) {
+ throw new HiveException("Partition or table doesn't exist.", e);
+ } catch (Exception e) {
+ throw new HiveException("Unknown error. Please check logs.", e);
+ }
+ }
+
public List<String> getPartitionNames(String tblName, short max) throws HiveException {
Table t = newTable(tblName);
return getPartitionNames(t.getDbName(), t.getTableName(), max);
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Wed Feb 12 21:32:34 2014
@@ -566,18 +566,7 @@ public class Partition implements Serial
* @return protect mode
*/
public ProtectMode getProtectMode(){
- Map<String, String> parameters = tPartition.getParameters();
-
- if (parameters == null) {
- return null;
- }
-
- if (!parameters.containsKey(ProtectMode.PARAMETER_NAME)) {
- return new ProtectMode();
- } else {
- return ProtectMode.getProtectModeFromString(
- parameters.get(ProtectMode.PARAMETER_NAME));
- }
+ return MetaStoreUtils.getProtectMode(tPartition);
}
/**
@@ -597,9 +586,7 @@ public class Partition implements Serial
* that it is OK to drop the table
*/
public boolean canDrop() {
- ProtectMode mode = getProtectMode();
- ProtectMode parentMode = table.getProtectMode();
- return (!mode.noDrop && !mode.offline && !mode.readOnly && !parentMode.noDropCascade);
+ return MetaStoreUtils.canDropPartition(table.getTTable(), tPartition);
}
/**
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java Wed Feb 12 21:32:34 2014
@@ -903,14 +903,7 @@ public class Table implements Serializab
* @return protect mode
*/
public ProtectMode getProtectMode(){
- Map<String, String> parameters = tTable.getParameters();
-
- if (!parameters.containsKey(ProtectMode.PARAMETER_NAME)) {
- return new ProtectMode();
- } else {
- return ProtectMode.getProtectModeFromString(
- parameters.get(ProtectMode.PARAMETER_NAME));
- }
+ return MetaStoreUtils.getProtectMode(tTable);
}
/**
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Wed Feb 12 21:32:34 2014
@@ -25,6 +25,7 @@ import java.io.Serializable;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -2416,29 +2417,31 @@ public class DDLSemanticAnalyzer extends
private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView)
throws SemanticException {
+ boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null)
+ || HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
+ // If the drop has to fail on non-existent partitions, we cannot batch expressions.
+ // That is because we actually have to check each separate expression for existence.
+ // We could do a small optimization for the case where expr has all columns and all
+ // operators are equality, if we assume those would always match one partition (which
+ // may not be true with legacy, non-normalized column values). This is probably a
+ // popular case but that's kinda hacky. Let's not do it for now.
+ boolean canGroupExprs = ifExists;
+
String tblName = getUnescapedName((ASTNode) ast.getChild(0));
- // get table metadata
Table tab = getTable(tblName, true);
- List<ExprNodeGenericFuncDesc> partSpecs = new ArrayList<ExprNodeGenericFuncDesc>();
- List<List<String>> names = new ArrayList<List<String>>();
- getFullPartitionSpecs(ast, tab, partSpecs, names);
+ Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs =
+ getFullPartitionSpecs(ast, tab, canGroupExprs);
+ if (partSpecs.isEmpty()) return; // nothing to do
+
validateAlterTableType(tab, AlterTableTypes.DROPPARTITION, expectView);
inputs.add(new ReadEntity(tab));
- boolean ignoreProtection = (ast.getFirstChildWithType(HiveParser.TOK_IGNOREPROTECTION) != null);
- if (partSpecs != null) {
- boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
- // we want to signal an error if the partition doesn't exist and we're
- // configured not to fail silently
- boolean throwException =
- !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
- addTableDropPartsOutputs(tblName, partSpecs, throwException, ignoreProtection);
- }
- DropTableDesc dropTblDesc =
- new DropTableDesc(tblName, partSpecs, names, expectView, ignoreProtection);
+ boolean ignoreProtection = ast.getFirstChildWithType(HiveParser.TOK_IGNOREPROTECTION) != null;
+ addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, ignoreProtection);
- rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
- dropTblDesc), conf));
+ DropTableDesc dropTblDesc =
+ new DropTableDesc(tblName, partSpecs, expectView, ignoreProtection);
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf));
}
private void analyzeAlterTablePartColType(ASTNode ast)
@@ -2747,22 +2750,25 @@ public class DDLSemanticAnalyzer extends
* Get the partition specs from the tree. This stores the full specification
* with the comparator operator into the output list.
*
- * @param ast
- * Tree to extract partitions from.
- * @throws SemanticException
+ * @param ast Tree to extract partitions from.
+ * @param tab Table.
+ * @param result Map of partitions by prefix length. Most of the time prefix length will
+ * be the same for all partition specs, so we can just OR the expressions.
*/
- private void getFullPartitionSpecs(CommonTree ast, Table tab,
- List<ExprNodeGenericFuncDesc> exprs, List<List<String>> cols) throws SemanticException {
+ private Map<Integer, List<ExprNodeGenericFuncDesc>> getFullPartitionSpecs(
+ CommonTree ast, Table tab, boolean canGroupExprs) throws SemanticException {
Map<String, String> colTypes = new HashMap<String, String>();
for (FieldSchema fs : tab.getPartitionKeys()) {
colTypes.put(fs.getName().toLowerCase(), fs.getType());
}
+ Map<Integer, List<ExprNodeGenericFuncDesc>> result =
+ new HashMap<Integer, List<ExprNodeGenericFuncDesc>>();
for (int childIndex = 1; childIndex < ast.getChildCount(); childIndex++) {
Tree partSpecTree = ast.getChild(childIndex);
if (partSpecTree.getType() != HiveParser.TOK_PARTSPEC) continue;
ExprNodeGenericFuncDesc expr = null;
- List<String> names = new ArrayList<String>(partSpecTree.getChildCount());
+ HashSet<String> names = new HashSet<String>(partSpecTree.getChildCount());
for (int i = 0; i < partSpecTree.getChildCount(); ++i) {
CommonTree partSpecSingleKey = (CommonTree) partSpecTree.getChild(i);
assert (partSpecSingleKey.getType() == HiveParser.TOK_PARTVAL);
@@ -2774,23 +2780,54 @@ public class DDLSemanticAnalyzer extends
if (type == null) {
throw new SemanticException("Column " + key + " not found");
}
+ // Create the corresponding hive expression to filter on partition columns.
ExprNodeColumnDesc column = new ExprNodeColumnDesc(
TypeInfoFactory.getPrimitiveTypeInfo(type), key, null, true);
- ExprNodeGenericFuncDesc op = new ExprNodeGenericFuncDesc(
- TypeInfoFactory.booleanTypeInfo,
- FunctionRegistry.getFunctionInfo(operator).getGenericUDF(),
- Lists.newArrayList(column, new ExprNodeConstantDesc(val)));
- expr = (expr == null) ? op : new ExprNodeGenericFuncDesc(
- TypeInfoFactory.booleanTypeInfo,
- FunctionRegistry.getGenericUDFForAnd(),
- Lists.<ExprNodeDesc>newArrayList(expr, op));
+ ExprNodeGenericFuncDesc op = makeBinaryPredicate(
+ operator, column, new ExprNodeConstantDesc(val));
+ // If it's multi-expr filter (e.g. a='5', b='2012-01-02'), AND with previous exprs.
+ expr = (expr == null) ? op : makeBinaryPredicate("and", expr, op);
names.add(key);
}
- if (expr != null) {
- exprs.add(expr);
- cols.add(names);
+ if (expr == null) continue;
+ // We got the expr for one full partition spec. Determine the prefix length.
+ int prefixLength = calculatePartPrefix(tab, names);
+ List<ExprNodeGenericFuncDesc> orExpr = result.get(prefixLength);
+ // We have to tell apart partitions resulting from spec with different prefix lengths.
+ // So, if we already have smth for the same prefix length, we can OR the two.
+ // If we don't, create a new separate filter. In most cases there will only be one.
+ if (orExpr == null) {
+ result.put(prefixLength, Lists.newArrayList(expr));
+ } else if (canGroupExprs) {
+ orExpr.set(0, makeBinaryPredicate("or", expr, orExpr.get(0)));
+ } else {
+ orExpr.add(expr);
}
}
+ return result;
+ }
+
+ private static ExprNodeGenericFuncDesc makeBinaryPredicate(
+ String fn, ExprNodeDesc left, ExprNodeDesc right) {
+ return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
+ FunctionRegistry.getFunctionInfo(fn).getGenericUDF(), Lists.newArrayList(left, right));
+ }
+
+ /**
+ * Calculates the partition prefix length based on the drop spec.
+ * This is used to avoid deleting archived partitions with lower level.
+ * For example, if, for A and B key cols, drop spec is A=5, B=6, we shouldn't drop
+ * archived A=5/, because it can contain B-s other than 6.
+ * @param tbl Table
+ * @param partSpecKeys Keys present in drop partition spec.
+ */
+ private int calculatePartPrefix(Table tbl, HashSet<String> partSpecKeys) {
+ int partPrefixToDrop = 0;
+ for (FieldSchema fs : tbl.getPartCols()) {
+ if (!partSpecKeys.contains(fs.getName())) break;
+ ++partPrefixToDrop;
+ }
+ return partPrefixToDrop;
}
/**
@@ -2881,39 +2918,42 @@ public class DDLSemanticAnalyzer extends
* pre-execution hook. If the partition does not exist, throw an error if
* throwIfNonExistent is true, otherwise ignore it.
*/
- private void addTableDropPartsOutputs(String tblName, List<ExprNodeGenericFuncDesc> partSpecs,
- boolean throwIfNonExistent, boolean ignoreProtection)
- throws SemanticException {
- Table tab = getTable(tblName);
-
- Iterator<ExprNodeGenericFuncDesc> i = partSpecs.iterator();
- while (i.hasNext()) {
- ExprNodeGenericFuncDesc partSpec = i.next();
- List<Partition> parts = new ArrayList<Partition>();
- boolean hasUnknown = false;
- try {
- hasUnknown = db.getPartitionsByExpr(tab, partSpec, conf, parts);
- } catch (Exception e) {
- throw new SemanticException(
- ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString()), e);
- }
- if (hasUnknown) {
- throw new SemanticException(
- "Unexpected unknown partitions for " + partSpec.getExprString());
- }
-
- if (parts.isEmpty()) {
- if (throwIfNonExistent) {
+ private void addTableDropPartsOutputs(Table tab,
+ Collection<List<ExprNodeGenericFuncDesc>> partSpecs, boolean throwIfNonExistent,
+ boolean ignoreProtection) throws SemanticException {
+
+ for (List<ExprNodeGenericFuncDesc> specs : partSpecs) {
+ for (ExprNodeGenericFuncDesc partSpec : specs) {
+ List<Partition> parts = new ArrayList<Partition>();
+ boolean hasUnknown = false;
+ try {
+ hasUnknown = db.getPartitionsByExpr(tab, partSpec, conf, parts);
+ } catch (Exception e) {
throw new SemanticException(
- ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString()));
+ ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString()), e);
}
- }
- for (Partition p : parts) {
- if (!ignoreProtection && !p.canDrop()) {
+ if (hasUnknown) {
throw new SemanticException(
- ErrorMsg.DROP_COMMAND_NOT_ALLOWED_FOR_PARTITION.getMsg(p.getCompleteName()));
+ "Unexpected unknown partitions for " + partSpec.getExprString());
+ }
+
+ // TODO: ifExists could be moved to metastore. In fact it already supports that. Check it
+ // for now since we get parts for output anyway, so we can get the error message
+ // earlier... If we get rid of output, we can get rid of this.
+ if (parts.isEmpty()) {
+ if (throwIfNonExistent) {
+ throw new SemanticException(
+ ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString()));
+ }
+ }
+ for (Partition p : parts) {
+ // TODO: same thing, metastore already checks this but check here if we can.
+ if (!ignoreProtection && !p.canDrop()) {
+ throw new SemanticException(
+ ErrorMsg.DROP_COMMAND_NOT_ALLOWED_FOR_PARTITION.getMsg(p.getCompleteName()));
+ }
+ outputs.add(new WriteEntity(p));
}
- outputs.add(new WriteEntity(p));
}
}
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java?rev=1567761&r1=1567760&r2=1567761&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java Wed Feb 12 21:32:34 2014
@@ -21,30 +21,31 @@ package org.apache.hadoop.hive.ql.plan;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
+import java.util.Map;
/**
* DropTableDesc.
- *
+ * TODO: this is currently used for both drop table and drop partitions.
*/
@Explain(displayName = "Drop Table")
public class DropTableDesc extends DDLDesc implements Serializable {
private static final long serialVersionUID = 1L;
public static class PartSpec {
- public PartSpec(ExprNodeGenericFuncDesc partSpec, ArrayList<String> partSpecKeys) {
+ public PartSpec(ExprNodeGenericFuncDesc partSpec, int prefixLength) {
this.partSpec = partSpec;
- this.partSpecKeys = partSpecKeys;
+ this.prefixLength = prefixLength;
}
public ExprNodeGenericFuncDesc getPartSpec() {
return partSpec;
}
- public ArrayList<String> getPartSpecKeys() {
- return partSpecKeys;
+ public int getPrefixLength() {
+ return prefixLength;
}
private static final long serialVersionUID = 1L;
private ExprNodeGenericFuncDesc partSpec;
// TODO: see if we can get rid of this... used in one place to distinguish archived parts
- private ArrayList<String> partSpecKeys;
+ private int prefixLength;
}
String tableName;
@@ -67,14 +68,15 @@ public class DropTableDesc extends DDLDe
this.ignoreProtection = false;
}
- public DropTableDesc(String tableName, List<ExprNodeGenericFuncDesc> partSpecs,
- List<List<String>> partSpecKeys, boolean expectView, boolean ignoreProtection) {
+ public DropTableDesc(String tableName, Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs,
+ boolean expectView, boolean ignoreProtection) {
this.tableName = tableName;
- assert partSpecs.size() == partSpecKeys.size();
this.partSpecs = new ArrayList<PartSpec>(partSpecs.size());
- for (int i = 0; i < partSpecs.size(); ++i) {
- this.partSpecs.add(new PartSpec(
- partSpecs.get(i), new ArrayList<String>(partSpecKeys.get(i))));
+ for (Map.Entry<Integer, List<ExprNodeGenericFuncDesc>> partSpec : partSpecs.entrySet()) {
+ int prefixLength = partSpec.getKey();
+ for (ExprNodeGenericFuncDesc expr : partSpec.getValue()) {
+ this.partSpecs.add(new PartSpec(expr, prefixLength));
+ }
}
this.ignoreProtection = ignoreProtection;
this.expectView = expectView;