You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2013/04/25 14:24:29 UTC
svn commit: r1475722 [6/7] - in /hive/trunk: ./ metastore/if/
metastore/src/gen/thrift/gen-cpp/
metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/
metastore/src/gen/thrift/gen-php/metastore/
metastore/src/gen/thrift/gen-py/hive...
Modified: hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py Thu Apr 25 12:24:28 2013
@@ -311,6 +311,17 @@ class Iface(fb303.FacebookService.Iface)
"""
pass
+ def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+ """
+ Parameters:
+ - partitionSpecs
+ - source_db
+ - source_table_name
+ - dest_db
+ - dest_table_name
+ """
+ pass
+
def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names):
"""
Parameters:
@@ -2011,6 +2022,52 @@ class Client(fb303.FacebookService.Clien
raise result.o2
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition failed: unknown result");
+ def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+ """
+ Parameters:
+ - partitionSpecs
+ - source_db
+ - source_table_name
+ - dest_db
+ - dest_table_name
+ """
+ self.send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+ return self.recv_exchange_partition()
+
+ def send_exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+ self._oprot.writeMessageBegin('exchange_partition', TMessageType.CALL, self._seqid)
+ args = exchange_partition_args()
+ args.partitionSpecs = partitionSpecs
+ args.source_db = source_db
+ args.source_table_name = source_table_name
+ args.dest_db = dest_db
+ args.dest_table_name = dest_table_name
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_exchange_partition(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = exchange_partition_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ if result.o3 is not None:
+ raise result.o3
+ if result.o4 is not None:
+ raise result.o4
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partition failed: unknown result");
+
def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names):
"""
Parameters:
@@ -3767,6 +3824,7 @@ class Processor(fb303.FacebookService.Pr
self._processMap["drop_partition_by_name"] = Processor.process_drop_partition_by_name
self._processMap["drop_partition_by_name_with_environment_context"] = Processor.process_drop_partition_by_name_with_environment_context
self._processMap["get_partition"] = Processor.process_get_partition
+ self._processMap["exchange_partition"] = Processor.process_exchange_partition
self._processMap["get_partition_with_auth"] = Processor.process_get_partition_with_auth
self._processMap["get_partition_by_name"] = Processor.process_get_partition_by_name
self._processMap["get_partitions"] = Processor.process_get_partitions
@@ -4415,6 +4473,26 @@ class Processor(fb303.FacebookService.Pr
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_exchange_partition(self, seqid, iprot, oprot):
+ args = exchange_partition_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = exchange_partition_result()
+ try:
+ result.success = self._handler.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name)
+ except MetaException as o1:
+ result.o1 = o1
+ except NoSuchObjectException as o2:
+ result.o2 = o2
+ except InvalidObjectException as o3:
+ result.o3 = o3
+ except InvalidInputException as o4:
+ result.o4 = o4
+ oprot.writeMessageBegin("exchange_partition", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_get_partition_with_auth(self, seqid, iprot, oprot):
args = get_partition_with_auth_args()
args.read(iprot)
@@ -11021,6 +11099,236 @@ class get_partition_result:
def __ne__(self, other):
return not (self == other)
+class exchange_partition_args:
+ """
+ Attributes:
+ - partitionSpecs
+ - source_db
+ - source_table_name
+ - dest_db
+ - dest_table_name
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.MAP, 'partitionSpecs', (TType.STRING,None,TType.STRING,None), None, ), # 1
+ (2, TType.STRING, 'source_db', None, None, ), # 2
+ (3, TType.STRING, 'source_table_name', None, None, ), # 3
+ (4, TType.STRING, 'dest_db', None, None, ), # 4
+ (5, TType.STRING, 'dest_table_name', None, None, ), # 5
+ )
+
+ def __init__(self, partitionSpecs=None, source_db=None, source_table_name=None, dest_db=None, dest_table_name=None,):
+ self.partitionSpecs = partitionSpecs
+ self.source_db = source_db
+ self.source_table_name = source_table_name
+ self.dest_db = dest_db
+ self.dest_table_name = dest_table_name
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.MAP:
+ self.partitionSpecs = {}
+ (_ktype342, _vtype343, _size341 ) = iprot.readMapBegin()
+ for _i345 in xrange(_size341):
+ _key346 = iprot.readString();
+ _val347 = iprot.readString();
+ self.partitionSpecs[_key346] = _val347
+ iprot.readMapEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.source_db = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.source_table_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRING:
+ self.dest_db = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.STRING:
+ self.dest_table_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('exchange_partition_args')
+ if self.partitionSpecs is not None:
+ oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
+ oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
+ for kiter348,viter349 in self.partitionSpecs.items():
+ oprot.writeString(kiter348)
+ oprot.writeString(viter349)
+ oprot.writeMapEnd()
+ oprot.writeFieldEnd()
+ if self.source_db is not None:
+ oprot.writeFieldBegin('source_db', TType.STRING, 2)
+ oprot.writeString(self.source_db)
+ oprot.writeFieldEnd()
+ if self.source_table_name is not None:
+ oprot.writeFieldBegin('source_table_name', TType.STRING, 3)
+ oprot.writeString(self.source_table_name)
+ oprot.writeFieldEnd()
+ if self.dest_db is not None:
+ oprot.writeFieldBegin('dest_db', TType.STRING, 4)
+ oprot.writeString(self.dest_db)
+ oprot.writeFieldEnd()
+ if self.dest_table_name is not None:
+ oprot.writeFieldBegin('dest_table_name', TType.STRING, 5)
+ oprot.writeString(self.dest_table_name)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class exchange_partition_result:
+ """
+ Attributes:
+ - success
+ - o1
+ - o2
+ - o3
+ - o4
+ """
+
+ thrift_spec = (
+ (0, TType.STRUCT, 'success', (Partition, Partition.thrift_spec), None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2
+ (3, TType.STRUCT, 'o3', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 3
+ (4, TType.STRUCT, 'o4', (InvalidInputException, InvalidInputException.thrift_spec), None, ), # 4
+ )
+
+ def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,):
+ self.success = success
+ self.o1 = o1
+ self.o2 = o2
+ self.o3 = o3
+ self.o4 = o4
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = Partition()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o2 = NoSuchObjectException()
+ self.o2.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.o3 = InvalidObjectException()
+ self.o3.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRUCT:
+ self.o4 = InvalidInputException()
+ self.o4.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('exchange_partition_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o2 is not None:
+ oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+ self.o2.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o3 is not None:
+ oprot.writeFieldBegin('o3', TType.STRUCT, 3)
+ self.o3.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o4 is not None:
+ oprot.writeFieldBegin('o4', TType.STRUCT, 4)
+ self.o4.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class get_partition_with_auth_args:
"""
Attributes:
@@ -11069,10 +11377,10 @@ class get_partition_with_auth_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype344, _size341) = iprot.readListBegin()
- for _i345 in xrange(_size341):
- _elem346 = iprot.readString();
- self.part_vals.append(_elem346)
+ (_etype353, _size350) = iprot.readListBegin()
+ for _i354 in xrange(_size350):
+ _elem355 = iprot.readString();
+ self.part_vals.append(_elem355)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11084,10 +11392,10 @@ class get_partition_with_auth_args:
elif fid == 5:
if ftype == TType.LIST:
self.group_names = []
- (_etype350, _size347) = iprot.readListBegin()
- for _i351 in xrange(_size347):
- _elem352 = iprot.readString();
- self.group_names.append(_elem352)
+ (_etype359, _size356) = iprot.readListBegin()
+ for _i360 in xrange(_size356):
+ _elem361 = iprot.readString();
+ self.group_names.append(_elem361)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11112,8 +11420,8 @@ class get_partition_with_auth_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter353 in self.part_vals:
- oprot.writeString(iter353)
+ for iter362 in self.part_vals:
+ oprot.writeString(iter362)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.user_name is not None:
@@ -11123,8 +11431,8 @@ class get_partition_with_auth_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter354 in self.group_names:
- oprot.writeString(iter354)
+ for iter363 in self.group_names:
+ oprot.writeString(iter363)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -11516,11 +11824,11 @@ class get_partitions_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype358, _size355) = iprot.readListBegin()
- for _i359 in xrange(_size355):
- _elem360 = Partition()
- _elem360.read(iprot)
- self.success.append(_elem360)
+ (_etype367, _size364) = iprot.readListBegin()
+ for _i368 in xrange(_size364):
+ _elem369 = Partition()
+ _elem369.read(iprot)
+ self.success.append(_elem369)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11549,8 +11857,8 @@ class get_partitions_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter361 in self.success:
- iter361.write(oprot)
+ for iter370 in self.success:
+ iter370.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -11637,10 +11945,10 @@ class get_partitions_with_auth_args:
elif fid == 5:
if ftype == TType.LIST:
self.group_names = []
- (_etype365, _size362) = iprot.readListBegin()
- for _i366 in xrange(_size362):
- _elem367 = iprot.readString();
- self.group_names.append(_elem367)
+ (_etype374, _size371) = iprot.readListBegin()
+ for _i375 in xrange(_size371):
+ _elem376 = iprot.readString();
+ self.group_names.append(_elem376)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11673,8 +11981,8 @@ class get_partitions_with_auth_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter368 in self.group_names:
- oprot.writeString(iter368)
+ for iter377 in self.group_names:
+ oprot.writeString(iter377)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -11726,11 +12034,11 @@ class get_partitions_with_auth_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype372, _size369) = iprot.readListBegin()
- for _i373 in xrange(_size369):
- _elem374 = Partition()
- _elem374.read(iprot)
- self.success.append(_elem374)
+ (_etype381, _size378) = iprot.readListBegin()
+ for _i382 in xrange(_size378):
+ _elem383 = Partition()
+ _elem383.read(iprot)
+ self.success.append(_elem383)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11759,8 +12067,8 @@ class get_partitions_with_auth_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter375 in self.success:
- iter375.write(oprot)
+ for iter384 in self.success:
+ iter384.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -11901,10 +12209,10 @@ class get_partition_names_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype379, _size376) = iprot.readListBegin()
- for _i380 in xrange(_size376):
- _elem381 = iprot.readString();
- self.success.append(_elem381)
+ (_etype388, _size385) = iprot.readListBegin()
+ for _i389 in xrange(_size385):
+ _elem390 = iprot.readString();
+ self.success.append(_elem390)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -11927,8 +12235,8 @@ class get_partition_names_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter382 in self.success:
- oprot.writeString(iter382)
+ for iter391 in self.success:
+ oprot.writeString(iter391)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o2 is not None:
@@ -11998,10 +12306,10 @@ class get_partitions_ps_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype386, _size383) = iprot.readListBegin()
- for _i387 in xrange(_size383):
- _elem388 = iprot.readString();
- self.part_vals.append(_elem388)
+ (_etype395, _size392) = iprot.readListBegin()
+ for _i396 in xrange(_size392):
+ _elem397 = iprot.readString();
+ self.part_vals.append(_elem397)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12031,8 +12339,8 @@ class get_partitions_ps_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter389 in self.part_vals:
- oprot.writeString(iter389)
+ for iter398 in self.part_vals:
+ oprot.writeString(iter398)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.max_parts is not None:
@@ -12088,11 +12396,11 @@ class get_partitions_ps_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype393, _size390) = iprot.readListBegin()
- for _i394 in xrange(_size390):
- _elem395 = Partition()
- _elem395.read(iprot)
- self.success.append(_elem395)
+ (_etype402, _size399) = iprot.readListBegin()
+ for _i403 in xrange(_size399):
+ _elem404 = Partition()
+ _elem404.read(iprot)
+ self.success.append(_elem404)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12121,8 +12429,8 @@ class get_partitions_ps_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter396 in self.success:
- iter396.write(oprot)
+ for iter405 in self.success:
+ iter405.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -12202,10 +12510,10 @@ class get_partitions_ps_with_auth_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype400, _size397) = iprot.readListBegin()
- for _i401 in xrange(_size397):
- _elem402 = iprot.readString();
- self.part_vals.append(_elem402)
+ (_etype409, _size406) = iprot.readListBegin()
+ for _i410 in xrange(_size406):
+ _elem411 = iprot.readString();
+ self.part_vals.append(_elem411)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12222,10 +12530,10 @@ class get_partitions_ps_with_auth_args:
elif fid == 6:
if ftype == TType.LIST:
self.group_names = []
- (_etype406, _size403) = iprot.readListBegin()
- for _i407 in xrange(_size403):
- _elem408 = iprot.readString();
- self.group_names.append(_elem408)
+ (_etype415, _size412) = iprot.readListBegin()
+ for _i416 in xrange(_size412):
+ _elem417 = iprot.readString();
+ self.group_names.append(_elem417)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12250,8 +12558,8 @@ class get_partitions_ps_with_auth_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter409 in self.part_vals:
- oprot.writeString(iter409)
+ for iter418 in self.part_vals:
+ oprot.writeString(iter418)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.max_parts is not None:
@@ -12265,8 +12573,8 @@ class get_partitions_ps_with_auth_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 6)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter410 in self.group_names:
- oprot.writeString(iter410)
+ for iter419 in self.group_names:
+ oprot.writeString(iter419)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -12318,11 +12626,11 @@ class get_partitions_ps_with_auth_result
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype414, _size411) = iprot.readListBegin()
- for _i415 in xrange(_size411):
- _elem416 = Partition()
- _elem416.read(iprot)
- self.success.append(_elem416)
+ (_etype423, _size420) = iprot.readListBegin()
+ for _i424 in xrange(_size420):
+ _elem425 = Partition()
+ _elem425.read(iprot)
+ self.success.append(_elem425)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12351,8 +12659,8 @@ class get_partitions_ps_with_auth_result
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter417 in self.success:
- iter417.write(oprot)
+ for iter426 in self.success:
+ iter426.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -12426,10 +12734,10 @@ class get_partition_names_ps_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype421, _size418) = iprot.readListBegin()
- for _i422 in xrange(_size418):
- _elem423 = iprot.readString();
- self.part_vals.append(_elem423)
+ (_etype430, _size427) = iprot.readListBegin()
+ for _i431 in xrange(_size427):
+ _elem432 = iprot.readString();
+ self.part_vals.append(_elem432)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12459,8 +12767,8 @@ class get_partition_names_ps_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter424 in self.part_vals:
- oprot.writeString(iter424)
+ for iter433 in self.part_vals:
+ oprot.writeString(iter433)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.max_parts is not None:
@@ -12516,10 +12824,10 @@ class get_partition_names_ps_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype428, _size425) = iprot.readListBegin()
- for _i429 in xrange(_size425):
- _elem430 = iprot.readString();
- self.success.append(_elem430)
+ (_etype437, _size434) = iprot.readListBegin()
+ for _i438 in xrange(_size434):
+ _elem439 = iprot.readString();
+ self.success.append(_elem439)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12548,8 +12856,8 @@ class get_partition_names_ps_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter431 in self.success:
- oprot.writeString(iter431)
+ for iter440 in self.success:
+ oprot.writeString(iter440)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -12705,11 +13013,11 @@ class get_partitions_by_filter_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype435, _size432) = iprot.readListBegin()
- for _i436 in xrange(_size432):
- _elem437 = Partition()
- _elem437.read(iprot)
- self.success.append(_elem437)
+ (_etype444, _size441) = iprot.readListBegin()
+ for _i445 in xrange(_size441):
+ _elem446 = Partition()
+ _elem446.read(iprot)
+ self.success.append(_elem446)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12738,8 +13046,8 @@ class get_partitions_by_filter_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter438 in self.success:
- iter438.write(oprot)
+ for iter447 in self.success:
+ iter447.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -12810,10 +13118,10 @@ class get_partitions_by_names_args:
elif fid == 3:
if ftype == TType.LIST:
self.names = []
- (_etype442, _size439) = iprot.readListBegin()
- for _i443 in xrange(_size439):
- _elem444 = iprot.readString();
- self.names.append(_elem444)
+ (_etype451, _size448) = iprot.readListBegin()
+ for _i452 in xrange(_size448):
+ _elem453 = iprot.readString();
+ self.names.append(_elem453)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12838,8 +13146,8 @@ class get_partitions_by_names_args:
if self.names is not None:
oprot.writeFieldBegin('names', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.names))
- for iter445 in self.names:
- oprot.writeString(iter445)
+ for iter454 in self.names:
+ oprot.writeString(iter454)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -12891,11 +13199,11 @@ class get_partitions_by_names_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype449, _size446) = iprot.readListBegin()
- for _i450 in xrange(_size446):
- _elem451 = Partition()
- _elem451.read(iprot)
- self.success.append(_elem451)
+ (_etype458, _size455) = iprot.readListBegin()
+ for _i459 in xrange(_size455):
+ _elem460 = Partition()
+ _elem460.read(iprot)
+ self.success.append(_elem460)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -12924,8 +13232,8 @@ class get_partitions_by_names_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter452 in self.success:
- iter452.write(oprot)
+ for iter461 in self.success:
+ iter461.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -13155,11 +13463,11 @@ class alter_partitions_args:
elif fid == 3:
if ftype == TType.LIST:
self.new_parts = []
- (_etype456, _size453) = iprot.readListBegin()
- for _i457 in xrange(_size453):
- _elem458 = Partition()
- _elem458.read(iprot)
- self.new_parts.append(_elem458)
+ (_etype465, _size462) = iprot.readListBegin()
+ for _i466 in xrange(_size462):
+ _elem467 = Partition()
+ _elem467.read(iprot)
+ self.new_parts.append(_elem467)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -13184,8 +13492,8 @@ class alter_partitions_args:
if self.new_parts is not None:
oprot.writeFieldBegin('new_parts', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
- for iter459 in self.new_parts:
- iter459.write(oprot)
+ for iter468 in self.new_parts:
+ iter468.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -13497,10 +13805,10 @@ class rename_partition_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype463, _size460) = iprot.readListBegin()
- for _i464 in xrange(_size460):
- _elem465 = iprot.readString();
- self.part_vals.append(_elem465)
+ (_etype472, _size469) = iprot.readListBegin()
+ for _i473 in xrange(_size469):
+ _elem474 = iprot.readString();
+ self.part_vals.append(_elem474)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -13531,8 +13839,8 @@ class rename_partition_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter466 in self.part_vals:
- oprot.writeString(iter466)
+ for iter475 in self.part_vals:
+ oprot.writeString(iter475)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.new_part is not None:
@@ -13660,10 +13968,10 @@ class partition_name_has_valid_character
if fid == 1:
if ftype == TType.LIST:
self.part_vals = []
- (_etype470, _size467) = iprot.readListBegin()
- for _i471 in xrange(_size467):
- _elem472 = iprot.readString();
- self.part_vals.append(_elem472)
+ (_etype479, _size476) = iprot.readListBegin()
+ for _i480 in xrange(_size476):
+ _elem481 = iprot.readString();
+ self.part_vals.append(_elem481)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -13685,8 +13993,8 @@ class partition_name_has_valid_character
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter473 in self.part_vals:
- oprot.writeString(iter473)
+ for iter482 in self.part_vals:
+ oprot.writeString(iter482)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.throw_exception is not None:
@@ -14015,10 +14323,10 @@ class partition_name_to_vals_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype477, _size474) = iprot.readListBegin()
- for _i478 in xrange(_size474):
- _elem479 = iprot.readString();
- self.success.append(_elem479)
+ (_etype486, _size483) = iprot.readListBegin()
+ for _i487 in xrange(_size483):
+ _elem488 = iprot.readString();
+ self.success.append(_elem488)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -14041,8 +14349,8 @@ class partition_name_to_vals_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter480 in self.success:
- oprot.writeString(iter480)
+ for iter489 in self.success:
+ oprot.writeString(iter489)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -14155,11 +14463,11 @@ class partition_name_to_spec_result:
if fid == 0:
if ftype == TType.MAP:
self.success = {}
- (_ktype482, _vtype483, _size481 ) = iprot.readMapBegin()
- for _i485 in xrange(_size481):
- _key486 = iprot.readString();
- _val487 = iprot.readString();
- self.success[_key486] = _val487
+ (_ktype491, _vtype492, _size490 ) = iprot.readMapBegin()
+ for _i494 in xrange(_size490):
+ _key495 = iprot.readString();
+ _val496 = iprot.readString();
+ self.success[_key495] = _val496
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -14182,9 +14490,9 @@ class partition_name_to_spec_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
- for kiter488,viter489 in self.success.items():
- oprot.writeString(kiter488)
- oprot.writeString(viter489)
+ for kiter497,viter498 in self.success.items():
+ oprot.writeString(kiter497)
+ oprot.writeString(viter498)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -14254,11 +14562,11 @@ class markPartitionForEvent_args:
elif fid == 3:
if ftype == TType.MAP:
self.part_vals = {}
- (_ktype491, _vtype492, _size490 ) = iprot.readMapBegin()
- for _i494 in xrange(_size490):
- _key495 = iprot.readString();
- _val496 = iprot.readString();
- self.part_vals[_key495] = _val496
+ (_ktype500, _vtype501, _size499 ) = iprot.readMapBegin()
+ for _i503 in xrange(_size499):
+ _key504 = iprot.readString();
+ _val505 = iprot.readString();
+ self.part_vals[_key504] = _val505
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -14288,9 +14596,9 @@ class markPartitionForEvent_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
- for kiter497,viter498 in self.part_vals.items():
- oprot.writeString(kiter497)
- oprot.writeString(viter498)
+ for kiter506,viter507 in self.part_vals.items():
+ oprot.writeString(kiter506)
+ oprot.writeString(viter507)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.eventType is not None:
@@ -14486,11 +14794,11 @@ class isPartitionMarkedForEvent_args:
elif fid == 3:
if ftype == TType.MAP:
self.part_vals = {}
- (_ktype500, _vtype501, _size499 ) = iprot.readMapBegin()
- for _i503 in xrange(_size499):
- _key504 = iprot.readString();
- _val505 = iprot.readString();
- self.part_vals[_key504] = _val505
+ (_ktype509, _vtype510, _size508 ) = iprot.readMapBegin()
+ for _i512 in xrange(_size508):
+ _key513 = iprot.readString();
+ _val514 = iprot.readString();
+ self.part_vals[_key513] = _val514
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -14520,9 +14828,9 @@ class isPartitionMarkedForEvent_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
- for kiter506,viter507 in self.part_vals.items():
- oprot.writeString(kiter506)
- oprot.writeString(viter507)
+ for kiter515,viter516 in self.part_vals.items():
+ oprot.writeString(kiter515)
+ oprot.writeString(viter516)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.eventType is not None:
@@ -15494,11 +15802,11 @@ class get_indexes_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype511, _size508) = iprot.readListBegin()
- for _i512 in xrange(_size508):
- _elem513 = Index()
- _elem513.read(iprot)
- self.success.append(_elem513)
+ (_etype520, _size517) = iprot.readListBegin()
+ for _i521 in xrange(_size517):
+ _elem522 = Index()
+ _elem522.read(iprot)
+ self.success.append(_elem522)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -15527,8 +15835,8 @@ class get_indexes_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter514 in self.success:
- iter514.write(oprot)
+ for iter523 in self.success:
+ iter523.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -15669,10 +15977,10 @@ class get_index_names_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype518, _size515) = iprot.readListBegin()
- for _i519 in xrange(_size515):
- _elem520 = iprot.readString();
- self.success.append(_elem520)
+ (_etype527, _size524) = iprot.readListBegin()
+ for _i528 in xrange(_size524):
+ _elem529 = iprot.readString();
+ self.success.append(_elem529)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -15695,8 +16003,8 @@ class get_index_names_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter521 in self.success:
- oprot.writeString(iter521)
+ for iter530 in self.success:
+ oprot.writeString(iter530)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o2 is not None:
@@ -17206,10 +17514,10 @@ class get_role_names_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype525, _size522) = iprot.readListBegin()
- for _i526 in xrange(_size522):
- _elem527 = iprot.readString();
- self.success.append(_elem527)
+ (_etype534, _size531) = iprot.readListBegin()
+ for _i535 in xrange(_size531):
+ _elem536 = iprot.readString();
+ self.success.append(_elem536)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -17232,8 +17540,8 @@ class get_role_names_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter528 in self.success:
- oprot.writeString(iter528)
+ for iter537 in self.success:
+ oprot.writeString(iter537)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -17706,11 +18014,11 @@ class list_roles_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype532, _size529) = iprot.readListBegin()
- for _i533 in xrange(_size529):
- _elem534 = Role()
- _elem534.read(iprot)
- self.success.append(_elem534)
+ (_etype541, _size538) = iprot.readListBegin()
+ for _i542 in xrange(_size538):
+ _elem543 = Role()
+ _elem543.read(iprot)
+ self.success.append(_elem543)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -17733,8 +18041,8 @@ class list_roles_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter535 in self.success:
- iter535.write(oprot)
+ for iter544 in self.success:
+ iter544.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -17802,10 +18110,10 @@ class get_privilege_set_args:
elif fid == 3:
if ftype == TType.LIST:
self.group_names = []
- (_etype539, _size536) = iprot.readListBegin()
- for _i540 in xrange(_size536):
- _elem541 = iprot.readString();
- self.group_names.append(_elem541)
+ (_etype548, _size545) = iprot.readListBegin()
+ for _i549 in xrange(_size545):
+ _elem550 = iprot.readString();
+ self.group_names.append(_elem550)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -17830,8 +18138,8 @@ class get_privilege_set_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter542 in self.group_names:
- oprot.writeString(iter542)
+ for iter551 in self.group_names:
+ oprot.writeString(iter551)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -18038,11 +18346,11 @@ class list_privileges_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype546, _size543) = iprot.readListBegin()
- for _i547 in xrange(_size543):
- _elem548 = HiveObjectPrivilege()
- _elem548.read(iprot)
- self.success.append(_elem548)
+ (_etype555, _size552) = iprot.readListBegin()
+ for _i556 in xrange(_size552):
+ _elem557 = HiveObjectPrivilege()
+ _elem557.read(iprot)
+ self.success.append(_elem557)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -18065,8 +18373,8 @@ class list_privileges_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter549 in self.success:
- iter549.write(oprot)
+ for iter558 in self.success:
+ iter558.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -18391,10 +18699,10 @@ class set_ugi_args:
elif fid == 2:
if ftype == TType.LIST:
self.group_names = []
- (_etype553, _size550) = iprot.readListBegin()
- for _i554 in xrange(_size550):
- _elem555 = iprot.readString();
- self.group_names.append(_elem555)
+ (_etype562, _size559) = iprot.readListBegin()
+ for _i563 in xrange(_size559):
+ _elem564 = iprot.readString();
+ self.group_names.append(_elem564)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -18415,8 +18723,8 @@ class set_ugi_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter556 in self.group_names:
- oprot.writeString(iter556)
+ for iter565 in self.group_names:
+ oprot.writeString(iter565)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -18465,10 +18773,10 @@ class set_ugi_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype560, _size557) = iprot.readListBegin()
- for _i561 in xrange(_size557):
- _elem562 = iprot.readString();
- self.success.append(_elem562)
+ (_etype569, _size566) = iprot.readListBegin()
+ for _i570 in xrange(_size566):
+ _elem571 = iprot.readString();
+ self.success.append(_elem571)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -18491,8 +18799,8 @@ class set_ugi_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter563 in self.success:
- oprot.writeString(iter563)
+ for iter572 in self.success:
+ oprot.writeString(iter572)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Thu Apr 25 12:24:28 2013
@@ -611,6 +611,25 @@ module ThriftHiveMetastore
raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition failed: unknown result')
end
+ def exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+ send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+ return recv_exchange_partition()
+ end
+
+ def send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+ send_message('exchange_partition', Exchange_partition_args, :partitionSpecs => partitionSpecs, :source_db => source_db, :source_table_name => source_table_name, :dest_db => dest_db, :dest_table_name => dest_table_name)
+ end
+
+ def recv_exchange_partition()
+ result = receive_message(Exchange_partition_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ raise result.o4 unless result.o4.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partition failed: unknown result')
+ end
+
def get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)
send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)
return recv_get_partition_with_auth()
@@ -1873,6 +1892,23 @@ module ThriftHiveMetastore
write_result(result, oprot, 'get_partition', seqid)
end
+ def process_exchange_partition(seqid, iprot, oprot)
+ args = read_args(iprot, Exchange_partition_args)
+ result = Exchange_partition_result.new()
+ begin
+ result.success = @handler.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name)
+ rescue ::MetaException => o1
+ result.o1 = o1
+ rescue ::NoSuchObjectException => o2
+ result.o2 = o2
+ rescue ::InvalidObjectException => o3
+ result.o3 = o3
+ rescue ::InvalidInputException => o4
+ result.o4 = o4
+ end
+ write_result(result, oprot, 'exchange_partition', seqid)
+ end
+
def process_get_partition_with_auth(seqid, iprot, oprot)
args = read_args(iprot, Get_partition_with_auth_args)
result = Get_partition_with_auth_result.new()
@@ -3840,6 +3876,54 @@ module ThriftHiveMetastore
::Thrift::Struct.generate_accessors self
end
+ class Exchange_partition_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ PARTITIONSPECS = 1
+ SOURCE_DB = 2
+ SOURCE_TABLE_NAME = 3
+ DEST_DB = 4
+ DEST_TABLE_NAME = 5
+
+ FIELDS = {
+ PARTITIONSPECS => {:type => ::Thrift::Types::MAP, :name => 'partitionSpecs', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+ SOURCE_DB => {:type => ::Thrift::Types::STRING, :name => 'source_db'},
+ SOURCE_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'source_table_name'},
+ DEST_DB => {:type => ::Thrift::Types::STRING, :name => 'dest_db'},
+ DEST_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'dest_table_name'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Exchange_partition_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+ O3 = 3
+ O4 = 4
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::Partition},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException},
+ O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
class Get_partition_with_auth_args
include ::Thrift::Struct, ::Thrift::Struct_Union
DB_NAME = 1
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Thu Apr 25 12:24:28 2013
@@ -1863,6 +1863,73 @@ public class HiveMetaStore extends Thrif
return ret;
}
+ @Override
+ public Partition exchange_partition(Map<String, String> partitionSpecs,
+ String sourceDbName, String sourceTableName, String destDbName,
+ String destTableName) throws MetaException, NoSuchObjectException,
+ InvalidObjectException, InvalidInputException, TException {
+ boolean success = false;
+ boolean pathCreated = false;
+ RawStore ms = getMS();
+ ms.openTransaction();
+ Table destinationTable = ms.getTable(destDbName, destTableName);
+ Table sourceTable = ms.getTable(sourceDbName, sourceTableName);
+ List<String> partVals = MetaStoreUtils.getPvals(sourceTable.getPartitionKeys(),
+ partitionSpecs);
+ List<String> partValsPresent = new ArrayList<String> ();
+ List<FieldSchema> partitionKeysPresent = new ArrayList<FieldSchema> ();
+ int i = 0;
+ for (FieldSchema fs: sourceTable.getPartitionKeys()) {
+ String partVal = partVals.get(i);
+ if (partVal != null && !partVal.equals("")) {
+ partValsPresent.add(partVal);
+ partitionKeysPresent.add(fs);
+ }
+ i++;
+ }
+ List<Partition> partitionsToExchange = get_partitions_ps(sourceDbName, sourceTableName,
+ partVals, (short)-1);
+ boolean sameColumns = MetaStoreUtils.compareFieldColumns(
+ sourceTable.getSd().getCols(), destinationTable.getSd().getCols());
+ boolean samePartitions = MetaStoreUtils.compareFieldColumns(
+ sourceTable.getPartitionKeys(), destinationTable.getPartitionKeys());
+ if (!sameColumns || !samePartitions) {
+ throw new MetaException("The tables have different schemas." +
+ " Their partitions cannot be exchanged.");
+ }
+ Path sourcePath = new Path(sourceTable.getSd().getLocation(),
+ Warehouse.makePartName(partitionKeysPresent, partValsPresent));
+ Path destPath = new Path(destinationTable.getSd().getLocation(),
+ Warehouse.makePartName(partitionKeysPresent, partValsPresent));
+ try {
+ for (Partition partition: partitionsToExchange) {
+ Partition destPartition = new Partition(partition);
+ destPartition.setDbName(destDbName);
+ destPartition.setTableName(destinationTable.getTableName());
+ Path destPartitionPath = new Path(destinationTable.getSd().getLocation(),
+ Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues()));
+ destPartition.getSd().setLocation(destPartitionPath.toString());
+ ms.addPartition(destPartition);
+ ms.dropPartition(partition.getDbName(), sourceTable.getTableName(),
+ partition.getValues());
+ }
+ /**
+ * TODO: Use the hard link feature of hdfs
+ * once https://issues.apache.org/jira/browse/HDFS-3370 is done
+ */
+ pathCreated = wh.renameDir(sourcePath, destPath);
+ success = ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ if (pathCreated) {
+ wh.renameDir(destPath, sourcePath);
+ }
+ }
+ }
+ return new Partition();
+ }
+
private boolean drop_partition_common(RawStore ms, String db_name, String tbl_name,
List<String> part_vals, final boolean deleteData, final EnvironmentContext envContext)
throws MetaException, NoSuchObjectException, IOException, InvalidObjectException,
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Thu Apr 25 12:24:28 2013
@@ -424,6 +424,22 @@ public class HiveMetaStoreClient impleme
partName, envContext));
}
+ /**
+ * Exchange the partition between two tables
+ * @param partitionSpecs partitions specs of the parent partition to be exchanged
+ * @param destDb the db of the destination table
+ * @param destinationTableName the destination table name
+ @ @return new partition after exchanging
+ */
+ @Override
+ public Partition exchange_partition(Map<String, String> partitionSpecs,
+ String sourceDb, String sourceTable, String destDb,
+ String destinationTableName) throws MetaException,
+ NoSuchObjectException, InvalidObjectException, TException {
+ return client.exchange_partition(partitionSpecs, sourceDb, sourceTable,
+ destDb, destinationTableName);
+ }
+
public void validatePartitionNameCharacters(List<String> partVals)
throws TException, MetaException {
client.partition_name_has_valid_characters(partVals, true);
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Thu Apr 25 12:24:28 2013
@@ -347,6 +347,17 @@ public interface IMetaStoreClient {
List<String> partVals) throws NoSuchObjectException, MetaException, TException;
/**
+ * @param partition
+ * @param destdb
+ * @param destTableName
+ * @return partition object
+ */
+ public Partition exchange_partition(Map<String, String> partitionSpecs,
+ String sourceDb, String sourceTable, String destdb,
+ String destTableName) throws MetaException, NoSuchObjectException,
+ InvalidObjectException, TException;
+
+ /**
* @param dbName
* @param tblName
* @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Thu Apr 25 12:24:28 2013
@@ -308,6 +308,28 @@ public class MetaStoreUtils {
}
/**
+ * Given a list of partition columns and a partial mapping from
+ * some partition columns to values the function returns the values
+ * for the column.
+ * @param partCols the list of table partition columns
+ * @param partSpec the partial mapping from partition column to values
+ * @return list of values of for given partition columns, any missing
+ * values in partSpec is replaced by an empty string
+ */
+ public static List<String> getPvals(List<FieldSchema> partCols,
+ Map<String, String> partSpec) {
+ List<String> pvals = new ArrayList<String>();
+ for (FieldSchema field : partCols) {
+ String val = partSpec.get(field.getName());
+ if (val == null) {
+ val = "";
+ }
+ pvals.add(val);
+ }
+ return pvals;
+ }
+
+ /**
* validateName
*
* Checks the name conforms to our standars which are: "[a-zA-z_0-9]+". checks
@@ -1158,6 +1180,39 @@ public class MetaStoreUtils {
return getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern) == null;
}
+ /**
+ * @param schema1: The first schema to be compared
+ * @param schema2: The second schema to be compared
+ * @return true if the two schemas are the same else false
+ * for comparing a field we ignore the comment it has
+ */
+ public static boolean compareFieldColumns(List<FieldSchema> schema1, List<FieldSchema> schema2) {
+ if (schema1.size() != schema2.size()) {
+ return false;
+ }
+ for (int i = 0; i < schema1.size(); i++) {
+ FieldSchema f1 = schema1.get(i);
+ FieldSchema f2 = schema2.get(i);
+ // The default equals provided by thrift compares the comments too for
+ // equality, thus we need to compare the relevant fields here.
+ if (f1.getName() == null) {
+ if (f2.getName() != null) {
+ return false;
+ }
+ } else if (!f1.getName().equals(f2.getName())) {
+ return false;
+ }
+ if (f1.getType() == null) {
+ if (f2.getType() != null) {
+ return false;
+ }
+ } else if (!f1.getType().equals(f2.getType())) {
+ return false;
+ }
+ }
+ return true;
+ }
+
private static String getPartitionValWithInvalidCharacter(List<String> partVals,
Pattern partitionValidationPattern) {
if (partitionValidationPattern == null) {
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Thu Apr 25 12:24:28 2013
@@ -204,6 +204,18 @@ public class Warehouse {
return false;
}
+ public boolean renameDir(Path sourcePath, Path destPath) throws MetaException {
+ FileSystem fs = null;
+ try {
+ fs = getFs(sourcePath);
+ fs.rename(sourcePath, destPath);
+ return true;
+ } catch (Exception ex) {
+ MetaStoreUtils.logAndThrowMetaException(ex);
+ }
+ return false;
+ }
+
public boolean deleteDir(Path f, boolean recursive) throws MetaException {
FileSystem fs = getFs(f);
return fsHandler.deleteDir(fs, f, recursive, conf);
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Thu Apr 25 12:24:28 2013
@@ -301,7 +301,7 @@ public enum ErrorMsg {
"Cannot ALTER VIEW AS SELECT if view currently does not exist\n"),
REPLACE_VIEW_WITH_PARTITION(10217,
"Cannot replace a view with CREATE VIEW or REPLACE VIEW or " +
- "ALTER VIEW AS SELECT if the view has paritions\n"),
+ "ALTER VIEW AS SELECT if the view has partitions\n"),
EXISTING_TABLE_IS_NOT_VIEW(10218,
"Existing table is not a view\n"),
NO_SUPPORTED_ORDERBY_ALLCOLREF_POS(10219,
@@ -345,6 +345,10 @@ public enum ErrorMsg {
+ "fails to construct aggregation for the partition "),
ANALYZE_TABLE_PARTIALSCAN_AUTOGATHER(10233, "Analyze partialscan is not allowed " +
"if hive.stats.autogather is set to false"),
+ PARTITION_VALUE_NOT_CONTINUOUS(10234, "Parition values specifed are not continuous." +
+ " A subpartition value is specified without specififying the parent partition's value"),
+ TABLES_INCOMPATIBLE_SCHEMAS(10235, "Tables have incompatible schemas and their partitions " +
+ " cannot be exchanged."),
SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. "
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Thu Apr 25 12:24:28 2013
@@ -114,6 +114,7 @@ import org.apache.hadoop.hive.ql.plan.Al
import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
+import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition;
import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
@@ -431,6 +432,12 @@ public class DDLTask extends Task<DDLWor
return truncateTable(db, truncateTableDesc);
}
+ AlterTableExchangePartition alterTableExchangePartition =
+ work.getAlterTableExchangePartition();
+ if (alterTableExchangePartition != null) {
+ return exchangeTablePartition(db, alterTableExchangePartition);
+ }
+
} catch (InvalidTableException e) {
formatter.consoleError(console, "Table " + e.getTableName() + " does not exist",
formatter.MISSING);
@@ -3986,6 +3993,17 @@ public class DDLTask extends Task<DDLWor
return 0;
}
+ private int exchangeTablePartition(Hive db,
+ AlterTableExchangePartition exchangePartition) throws HiveException {
+ Map<String, String> partitionSpecs = exchangePartition.getPartitionSpecs();
+ Table destTable = exchangePartition.getDestinationTable();
+ Table sourceTable = exchangePartition.getSourceTable();
+ db.exchangeTablePartitions(partitionSpecs, sourceTable.getDbName(),
+ sourceTable.getTableName(),destTable.getDbName(),
+ destTable.getTableName());
+ return 0;
+ }
+
private List<Path> getLocations(Hive db, Table table, Map<String, String> partSpec)
throws HiveException {
List<Path> locations = new ArrayList<Path>();
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Thu Apr 25 12:24:28 2013
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.ql.Context;
@@ -46,7 +47,6 @@ import org.apache.hadoop.hive.ql.io.rcfi
import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj;
-import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -394,7 +394,8 @@ public class MoveTask extends Task<MoveW
}
dc = null; // reset data container to prevent it being added again.
} else { // static partitions
- List<String> partVals = Hive.getPvals(table.getPartCols(), tbd.getPartitionSpec());
+ List<String> partVals = MetaStoreUtils.getPvals(table.getPartCols(),
+ tbd.getPartitionSpec());
db.validatePartitionNameCharacters(partVals);
db.loadPartition(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(),
tbd.getPartitionSpec(), tbd.getReplace(), tbd.getHoldDDLTime(),
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Thu Apr 25 12:24:28 2013
@@ -1671,7 +1671,7 @@ private void constructOneLBLocationMap(F
List<String> names = null;
Table t = getTable(dbName, tblName);
- List<String> pvals = getPvals(t.getPartCols(), partSpec);
+ List<String> pvals = MetaStoreUtils.getPvals(t.getPartCols(), partSpec);
try {
names = getMSC().listPartitionNames(dbName, tblName, pvals, max);
@@ -1713,19 +1713,6 @@ private void constructOneLBLocationMap(F
}
}
- public static List<String> getPvals(List<FieldSchema> partCols,
- Map<String, String> partSpec) {
- List<String> pvals = new ArrayList<String>();
- for (FieldSchema field : partCols) {
- String val = partSpec.get(field.getName());
- if (val == null) {
- val = "";
- }
- pvals.add(val);
- }
- return pvals;
- }
-
/**
* get all the partitions of the table that matches the given partial
* specification. partition columns whose value is can be anything should be
@@ -1745,7 +1732,7 @@ private void constructOneLBLocationMap(F
"partitioned table");
}
- List<String> partialPvals = getPvals(tbl.getPartCols(), partialPartSpec);
+ List<String> partialPvals = MetaStoreUtils.getPvals(tbl.getPartCols(), partialPartSpec);
List<org.apache.hadoop.hive.metastore.api.Partition> partitions = null;
try {
@@ -2251,6 +2238,18 @@ private void constructOneLBLocationMap(F
}
}
+ public void exchangeTablePartitions(Map<String, String> partitionSpecs,
+ String sourceDb, String sourceTable, String destDb,
+ String destinationTableName) throws HiveException {
+ try {
+ getMSC().exchange_partition(partitionSpecs, sourceDb, sourceTable, destDb,
+ destinationTableName);
+ } catch (Exception ex) {
+ LOG.error(StringUtils.stringifyException(ex));
+ throw new HiveException(ex);
+ }
+ }
+
/**
* Creates a metastore client. Currently it creates only JDBC based client as
* File based store support is removed
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Thu Apr 25 12:24:28 2013
@@ -43,6 +43,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -75,8 +76,10 @@ import org.apache.hadoop.hive.ql.plan.Ad
import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
import org.apache.hadoop.hive.ql.plan.AlterIndexDesc.AlterIndexTypes;
+import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
+import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition;
import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
@@ -126,7 +129,6 @@ import org.apache.hadoop.hive.serde.serd
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
-import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
/**
* DDLSemanticAnalyzer.
@@ -405,6 +407,9 @@ public class DDLSemanticAnalyzer extends
case HiveParser.TOK_ALTERTABLE_SKEWED:
analyzeAltertableSkewedby(ast);
break;
+ case HiveParser.TOK_EXCHANGEPARTITION:
+ analyzeExchangePartition(ast);
+ break;
default:
throw new SemanticException("Unsupported command.");
}
@@ -663,6 +668,69 @@ public class DDLSemanticAnalyzer extends
}
+ private void analyzeExchangePartition(ASTNode ast) throws SemanticException {
+ Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(0)));
+ Table destTable = getTable(getUnescapedName((ASTNode)ast.getChild(2)));
+
+ // Get the partition specs
+ Map<String, String> partSpecs = getPartSpec((ASTNode) ast.getChild(1));
+ validatePartitionValues(partSpecs);
+ boolean sameColumns = MetaStoreUtils.compareFieldColumns(
+ sourceTable.getAllCols(), destTable.getAllCols());
+ boolean samePartitions = MetaStoreUtils.compareFieldColumns(
+ sourceTable.getPartitionKeys(), destTable.getPartitionKeys());
+ if (!sameColumns || !samePartitions) {
+ throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg());
+ }
+ List<Partition> partitions = getPartitions(sourceTable, partSpecs, true);
+
+ // Verify that the partitions specified are continuous
+ // If a subpartition value is specified without specifying a partition's value
+ // then we throw an exception
+ if (!isPartitionValueContinuous(sourceTable.getPartitionKeys(), partSpecs)) {
+ throw new SemanticException(
+ ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs.toString()));
+ }
+ List<Partition> destPartitions = null;
+ try {
+ destPartitions = getPartitions(destTable, partSpecs, true);
+ } catch (SemanticException ex) {
+ // We should expect a semantic exception being throw as this partition
+ // should not be present.
+ }
+ if (destPartitions != null) {
+ // If any destination partition is present then throw a Semantic Exception.
+ throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString()));
+ }
+ AlterTableExchangePartition alterTableExchangePartition =
+ new AlterTableExchangePartition(sourceTable, destTable, partSpecs);
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+ alterTableExchangePartition), conf));
+ }
+
+ /**
+ * @param partitionKeys the list of partition keys of the table
+ * @param partSpecs the partition specs given by the user
+ * @return true if no subpartition value is specified without a partition's
+ * value being specified else it returns false
+ */
+ private boolean isPartitionValueContinuous(List<FieldSchema> partitionKeys,
+ Map<String, String> partSpecs) {
+ boolean partitionMissing = false;
+ for (FieldSchema partitionKey: partitionKeys) {
+ if (!partSpecs.containsKey(partitionKey.getName())) {
+ partitionMissing = true;
+ } else {
+ if (partitionMissing) {
+ // A subpartition value exists after a missing partition
+ // The partition value specified are not continuous, return false
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
private void analyzeCreateDatabase(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
boolean ifNotExists = false;
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g Thu Apr 25 12:24:28 2013
@@ -258,6 +258,7 @@ KW_PARTIALSCAN: 'PARTIALSCAN';
KW_USER: 'USER';
KW_ROLE: 'ROLE';
KW_INNER: 'INNER';
+KW_EXCHANGE: 'EXCHANGE';
// Operators
// NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work.
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Thu Apr 25 12:24:28 2013
@@ -295,6 +295,7 @@ TOK_WINDOWSPEC;
TOK_WINDOWVALUES;
TOK_WINDOWRANGE;
TOK_IGNOREPROTECTION;
+TOK_EXCHANGEPARTITION;
}
@@ -867,6 +868,7 @@ alterTableStatementSuffix
| alterStatementSuffixProperties
| alterTblPartitionStatement
| alterStatementSuffixSkewedby
+ | alterStatementSuffixExchangePartition
;
alterViewStatementSuffix
@@ -1103,6 +1105,13 @@ alterStatementSuffixSkewedby
->^(TOK_ALTERTABLE_SKEWED $name storedAsDirs)
;
+alterStatementSuffixExchangePartition
+@init {msgs.push("alter exchange partition");}
+@after{msgs.pop();}
+ : name=tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName
+ -> ^(TOK_EXCHANGEPARTITION $name partitionSpec $exchangename)
+ ;
+
alterStatementSuffixProtectMode
@init { msgs.push("alter partition protect mode statement"); }
@after { msgs.pop(); }
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java Thu Apr 25 12:24:28 2013
@@ -202,6 +202,7 @@ public final class SemanticAnalyzerFacto
case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
case HiveParser.TOK_ALTERTABLE_SKEWED:
case HiveParser.TOK_TRUNCATETABLE:
+ case HiveParser.TOK_EXCHANGEPARTITION:
return new DDLSemanticAnalyzer(conf);
case HiveParser.TOK_ALTERTABLE_PARTITION:
HiveOperation commandType = null;
Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableExchangePartition.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableExchangePartition.java?rev=1475722&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableExchangePartition.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableExchangePartition.java Thu Apr 25 12:24:28 2013
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import java.util.Map;
+
+import org.apache.hadoop.hive.ql.metadata.Table;
+
+public class AlterTableExchangePartition extends DDLDesc {
+
+ // The source table
+ private Table sourceTable;
+
+ // The destination table
+ private Table destinationTable;
+
+ // The partition that has to be exchanged
+ private Map<String, String> partitionSpecs;
+
+ public AlterTableExchangePartition(Table sourceTable, Table destinationTable,
+ Map<String, String> partitionSpecs) {
+ super();
+ this.sourceTable = sourceTable;
+ this.destinationTable = destinationTable;
+ this.partitionSpecs = partitionSpecs;
+ }
+
+ public void setSourceTable(Table sourceTable) {
+ this.sourceTable = sourceTable;
+ }
+
+ public Table getSourceTable() {
+ return this.sourceTable;
+ }
+
+ public void setDestinationTable(Table destinationTable) {
+ this.destinationTable = destinationTable;
+ }
+
+ public Table getDestinationTable() {
+ return this.destinationTable;
+ }
+
+ public void setPartitionSpecs(Map<String, String> partitionSpecs) {
+ this.partitionSpecs = partitionSpecs;
+ }
+
+ public Map<String, String> getPartitionSpecs() {
+ return this.partitionSpecs;
+ }
+}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java?rev=1475722&r1=1475721&r2=1475722&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java Thu Apr 25 12:24:28 2013
@@ -64,6 +64,7 @@ public class DDLWork implements Serializ
private AlterDatabaseDesc alterDbDesc;
private AlterTableAlterPartDesc alterTableAlterPartDesc;
private TruncateTableDesc truncateTblDesc;
+ private AlterTableExchangePartition alterTableExchangePartition;
private RoleDDLDesc roleDDLDesc;
private GrantDesc grantDesc;
@@ -449,6 +450,12 @@ public class DDLWork implements Serializ
this.alterTableAlterPartDesc = alterPartDesc;
}
+ public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+ AlterTableExchangePartition alterTableExchangePartition) {
+ this(inputs, outputs);
+ this.alterTableExchangePartition = alterTableExchangePartition;
+ }
+
/**
* @return Create Database descriptor
*/
@@ -1025,4 +1032,20 @@ public class DDLWork implements Serializ
public void setTruncateTblDesc(TruncateTableDesc truncateTblDesc) {
this.truncateTblDesc = truncateTblDesc;
}
+
+ /**
+ * @return information about the table partition to be exchanged
+ */
+ public AlterTableExchangePartition getAlterTableExchangePartition() {
+ return this.alterTableExchangePartition;
+ }
+
+ /**
+ * @param alterTableExchangePartition
+ * set the value of the table partition to be exchanged
+ */
+ public void setAlterTableExchangePartition(
+ AlterTableExchangePartition alterTableExchangePartition) {
+ this.alterTableExchangePartition = alterTableExchangePartition;
+ }
}
Added: hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_incomplete_partition.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_incomplete_partition.q?rev=1475722&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_incomplete_partition.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_incomplete_partition.q Thu Apr 25 12:24:28 2013
@@ -0,0 +1,12 @@
+CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING, hr STRING);
+CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING, hr STRING);
+SHOW PARTITIONS exchange_part_test1;
+SHOW PARTITIONS exchange_part_test2;
+
+ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='h1');
+ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='h2');
+SHOW PARTITIONS exchange_part_test1;
+SHOW PARTITIONS exchange_part_test2;
+
+-- for exchange_part_test1 the value of ds is not given and the value of hr is given, thus this query will fail
+alter table exchange_part_test1 exchange partition (hr='h1') with table exchange_part_test2;
Added: hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists.q?rev=1475722&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists.q Thu Apr 25 12:24:28 2013
@@ -0,0 +1,12 @@
+CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING);
+CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING);
+SHOW PARTITIONS exchange_part_test1;
+SHOW PARTITIONS exchange_part_test2;
+
+ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05');
+ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05');
+SHOW PARTITIONS exchange_part_test1;
+SHOW PARTITIONS exchange_part_test2;
+
+-- exchange_part_test2 table partition (ds='2013-04-05') already exists thus this query will fail
+alter table exchange_part_test1 exchange partition (ds='2013-04-05') with table exchange_part_test2;
Added: hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists2.q?rev=1475722&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists2.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists2.q Thu Apr 25 12:24:28 2013
@@ -0,0 +1,13 @@
+CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING, hr STRING);
+CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING, hr STRING);
+SHOW PARTITIONS exchange_part_test1;
+SHOW PARTITIONS exchange_part_test2;
+
+ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='1');
+ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='2');
+ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='3');
+SHOW PARTITIONS exchange_part_test1;
+SHOW PARTITIONS exchange_part_test2;
+
+-- exchange_part_test2 table partition (ds='2013-04-05', hr='3') already exists thus this query will fail
+alter table exchange_part_test1 exchange partition (ds='2013-04-05') with table exchange_part_test2;
Added: hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists3.q?rev=1475722&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists3.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists3.q Thu Apr 25 12:24:28 2013
@@ -0,0 +1,13 @@
+CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING, hr STRING);
+CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING, hr STRING);
+SHOW PARTITIONS exchange_part_test1;
+SHOW PARTITIONS exchange_part_test2;
+
+ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='1');
+ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='2');
+ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='1');
+SHOW PARTITIONS exchange_part_test1;
+SHOW PARTITIONS exchange_part_test2;
+
+-- exchange_part_test2 table partition (ds='2013-04-05', hr='1') already exists thus this query will fail
+alter table exchange_part_test1 exchange partition (ds='2013-04-05') with table exchange_part_test2;
Added: hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_missing.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_missing.q?rev=1475722&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_missing.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_missing.q Thu Apr 25 12:24:28 2013
@@ -0,0 +1,6 @@
+CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING);
+CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING);
+SHOW PARTITIONS exchange_part_test1;
+
+-- exchange_part_test1 partition (ds='2013-04-05') does not exist thus this query will fail
+alter table exchange_part_test1 exchange partition (ds='2013-04-05') with table exchange_part_test2;
Added: hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_table_missing.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_table_missing.q?rev=1475722&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_table_missing.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/exchange_partition_neg_table_missing.q Thu Apr 25 12:24:28 2013
@@ -0,0 +1,2 @@
+-- t1 does not exist and the query fails
+alter table t1 exchange partition (ds='2013-04-05') with table t2;