You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2014/09/05 19:52:34 UTC

svn commit: r1622748 [12/13] - in /hive/trunk/metastore: if/ src/gen/thrift/gen-cpp/ src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ src/gen/thrift/gen-php/metastore/ src/gen/thrift/gen-py/hive_metastore/ src/gen/thrift/gen-rb/ src/ja...

Modified: hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py Fri Sep  5 17:52:32 2014
@@ -2889,6 +2889,406 @@ class Partition:
   def __ne__(self, other):
     return not (self == other)
 
+class PartitionWithoutSD:
+  """
+  Attributes:
+   - values
+   - createTime
+   - lastAccessTime
+   - relativePath
+   - parameters
+   - privileges
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'values', (TType.STRING,None), None, ), # 1
+    (2, TType.I32, 'createTime', None, None, ), # 2
+    (3, TType.I32, 'lastAccessTime', None, None, ), # 3
+    (4, TType.STRING, 'relativePath', None, None, ), # 4
+    (5, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 5
+    (6, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 6
+  )
+
+  def __init__(self, values=None, createTime=None, lastAccessTime=None, relativePath=None, parameters=None, privileges=None,):
+    self.values = values
+    self.createTime = createTime
+    self.lastAccessTime = lastAccessTime
+    self.relativePath = relativePath
+    self.parameters = parameters
+    self.privileges = privileges
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.values = []
+          (_etype203, _size200) = iprot.readListBegin()
+          for _i204 in xrange(_size200):
+            _elem205 = iprot.readString();
+            self.values.append(_elem205)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.createTime = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I32:
+          self.lastAccessTime = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.STRING:
+          self.relativePath = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.MAP:
+          self.parameters = {}
+          (_ktype207, _vtype208, _size206 ) = iprot.readMapBegin() 
+          for _i210 in xrange(_size206):
+            _key211 = iprot.readString();
+            _val212 = iprot.readString();
+            self.parameters[_key211] = _val212
+          iprot.readMapEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.STRUCT:
+          self.privileges = PrincipalPrivilegeSet()
+          self.privileges.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('PartitionWithoutSD')
+    if self.values is not None:
+      oprot.writeFieldBegin('values', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRING, len(self.values))
+      for iter213 in self.values:
+        oprot.writeString(iter213)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.createTime is not None:
+      oprot.writeFieldBegin('createTime', TType.I32, 2)
+      oprot.writeI32(self.createTime)
+      oprot.writeFieldEnd()
+    if self.lastAccessTime is not None:
+      oprot.writeFieldBegin('lastAccessTime', TType.I32, 3)
+      oprot.writeI32(self.lastAccessTime)
+      oprot.writeFieldEnd()
+    if self.relativePath is not None:
+      oprot.writeFieldBegin('relativePath', TType.STRING, 4)
+      oprot.writeString(self.relativePath)
+      oprot.writeFieldEnd()
+    if self.parameters is not None:
+      oprot.writeFieldBegin('parameters', TType.MAP, 5)
+      oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
+      for kiter214,viter215 in self.parameters.items():
+        oprot.writeString(kiter214)
+        oprot.writeString(viter215)
+      oprot.writeMapEnd()
+      oprot.writeFieldEnd()
+    if self.privileges is not None:
+      oprot.writeFieldBegin('privileges', TType.STRUCT, 6)
+      self.privileges.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class PartitionSpecWithSharedSD:
+  """
+  Attributes:
+   - partitions
+   - sd
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'partitions', (TType.STRUCT,(PartitionWithoutSD, PartitionWithoutSD.thrift_spec)), None, ), # 1
+    (2, TType.STRUCT, 'sd', (StorageDescriptor, StorageDescriptor.thrift_spec), None, ), # 2
+  )
+
+  def __init__(self, partitions=None, sd=None,):
+    self.partitions = partitions
+    self.sd = sd
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.partitions = []
+          (_etype219, _size216) = iprot.readListBegin()
+          for _i220 in xrange(_size216):
+            _elem221 = PartitionWithoutSD()
+            _elem221.read(iprot)
+            self.partitions.append(_elem221)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRUCT:
+          self.sd = StorageDescriptor()
+          self.sd.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('PartitionSpecWithSharedSD')
+    if self.partitions is not None:
+      oprot.writeFieldBegin('partitions', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.partitions))
+      for iter222 in self.partitions:
+        iter222.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.sd is not None:
+      oprot.writeFieldBegin('sd', TType.STRUCT, 2)
+      self.sd.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class PartitionListComposingSpec:
+  """
+  Attributes:
+   - partitions
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1
+  )
+
+  def __init__(self, partitions=None,):
+    self.partitions = partitions
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.partitions = []
+          (_etype226, _size223) = iprot.readListBegin()
+          for _i227 in xrange(_size223):
+            _elem228 = Partition()
+            _elem228.read(iprot)
+            self.partitions.append(_elem228)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('PartitionListComposingSpec')
+    if self.partitions is not None:
+      oprot.writeFieldBegin('partitions', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.partitions))
+      for iter229 in self.partitions:
+        iter229.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class PartitionSpec:
+  """
+  Attributes:
+   - dbName
+   - tableName
+   - rootPath
+   - sharedSDPartitionSpec
+   - partitionList
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'dbName', None, None, ), # 1
+    (2, TType.STRING, 'tableName', None, None, ), # 2
+    (3, TType.STRING, 'rootPath', None, None, ), # 3
+    (4, TType.STRUCT, 'sharedSDPartitionSpec', (PartitionSpecWithSharedSD, PartitionSpecWithSharedSD.thrift_spec), None, ), # 4
+    (5, TType.STRUCT, 'partitionList', (PartitionListComposingSpec, PartitionListComposingSpec.thrift_spec), None, ), # 5
+  )
+
+  def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None,):
+    self.dbName = dbName
+    self.tableName = tableName
+    self.rootPath = rootPath
+    self.sharedSDPartitionSpec = sharedSDPartitionSpec
+    self.partitionList = partitionList
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.dbName = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.tableName = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.rootPath = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.STRUCT:
+          self.sharedSDPartitionSpec = PartitionSpecWithSharedSD()
+          self.sharedSDPartitionSpec.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.STRUCT:
+          self.partitionList = PartitionListComposingSpec()
+          self.partitionList.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('PartitionSpec')
+    if self.dbName is not None:
+      oprot.writeFieldBegin('dbName', TType.STRING, 1)
+      oprot.writeString(self.dbName)
+      oprot.writeFieldEnd()
+    if self.tableName is not None:
+      oprot.writeFieldBegin('tableName', TType.STRING, 2)
+      oprot.writeString(self.tableName)
+      oprot.writeFieldEnd()
+    if self.rootPath is not None:
+      oprot.writeFieldBegin('rootPath', TType.STRING, 3)
+      oprot.writeString(self.rootPath)
+      oprot.writeFieldEnd()
+    if self.sharedSDPartitionSpec is not None:
+      oprot.writeFieldBegin('sharedSDPartitionSpec', TType.STRUCT, 4)
+      self.sharedSDPartitionSpec.write(oprot)
+      oprot.writeFieldEnd()
+    if self.partitionList is not None:
+      oprot.writeFieldBegin('partitionList', TType.STRUCT, 5)
+      self.partitionList.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class Index:
   """
   Attributes:
@@ -2983,11 +3383,11 @@ class Index:
       elif fid == 9:
         if ftype == TType.MAP:
           self.parameters = {}
-          (_ktype201, _vtype202, _size200 ) = iprot.readMapBegin() 
-          for _i204 in xrange(_size200):
-            _key205 = iprot.readString();
-            _val206 = iprot.readString();
-            self.parameters[_key205] = _val206
+          (_ktype231, _vtype232, _size230 ) = iprot.readMapBegin() 
+          for _i234 in xrange(_size230):
+            _key235 = iprot.readString();
+            _val236 = iprot.readString();
+            self.parameters[_key235] = _val236
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -3041,9 +3441,9 @@ class Index:
     if self.parameters is not None:
       oprot.writeFieldBegin('parameters', TType.MAP, 9)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
-      for kiter207,viter208 in self.parameters.items():
-        oprot.writeString(kiter207)
-        oprot.writeString(viter208)
+      for kiter237,viter238 in self.parameters.items():
+        oprot.writeString(kiter237)
+        oprot.writeString(viter238)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.deferredRebuild is not None:
@@ -4097,11 +4497,11 @@ class ColumnStatistics:
       elif fid == 2:
         if ftype == TType.LIST:
           self.statsObj = []
-          (_etype212, _size209) = iprot.readListBegin()
-          for _i213 in xrange(_size209):
-            _elem214 = ColumnStatisticsObj()
-            _elem214.read(iprot)
-            self.statsObj.append(_elem214)
+          (_etype242, _size239) = iprot.readListBegin()
+          for _i243 in xrange(_size239):
+            _elem244 = ColumnStatisticsObj()
+            _elem244.read(iprot)
+            self.statsObj.append(_elem244)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4122,8 +4522,8 @@ class ColumnStatistics:
     if self.statsObj is not None:
       oprot.writeFieldBegin('statsObj', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.statsObj))
-      for iter215 in self.statsObj:
-        iter215.write(oprot)
+      for iter245 in self.statsObj:
+        iter245.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4177,11 +4577,11 @@ class AggrStats:
       if fid == 1:
         if ftype == TType.LIST:
           self.colStats = []
-          (_etype219, _size216) = iprot.readListBegin()
-          for _i220 in xrange(_size216):
-            _elem221 = ColumnStatisticsObj()
-            _elem221.read(iprot)
-            self.colStats.append(_elem221)
+          (_etype249, _size246) = iprot.readListBegin()
+          for _i250 in xrange(_size246):
+            _elem251 = ColumnStatisticsObj()
+            _elem251.read(iprot)
+            self.colStats.append(_elem251)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4203,8 +4603,8 @@ class AggrStats:
     if self.colStats is not None:
       oprot.writeFieldBegin('colStats', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.colStats))
-      for iter222 in self.colStats:
-        iter222.write(oprot)
+      for iter252 in self.colStats:
+        iter252.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.partsFound is not None:
@@ -4259,11 +4659,11 @@ class SetPartitionsStatsRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.colStats = []
-          (_etype226, _size223) = iprot.readListBegin()
-          for _i227 in xrange(_size223):
-            _elem228 = ColumnStatistics()
-            _elem228.read(iprot)
-            self.colStats.append(_elem228)
+          (_etype256, _size253) = iprot.readListBegin()
+          for _i257 in xrange(_size253):
+            _elem258 = ColumnStatistics()
+            _elem258.read(iprot)
+            self.colStats.append(_elem258)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4280,8 +4680,8 @@ class SetPartitionsStatsRequest:
     if self.colStats is not None:
       oprot.writeFieldBegin('colStats', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.colStats))
-      for iter229 in self.colStats:
-        iter229.write(oprot)
+      for iter259 in self.colStats:
+        iter259.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4333,22 +4733,22 @@ class Schema:
       if fid == 1:
         if ftype == TType.LIST:
           self.fieldSchemas = []
-          (_etype233, _size230) = iprot.readListBegin()
-          for _i234 in xrange(_size230):
-            _elem235 = FieldSchema()
-            _elem235.read(iprot)
-            self.fieldSchemas.append(_elem235)
+          (_etype263, _size260) = iprot.readListBegin()
+          for _i264 in xrange(_size260):
+            _elem265 = FieldSchema()
+            _elem265.read(iprot)
+            self.fieldSchemas.append(_elem265)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.MAP:
           self.properties = {}
-          (_ktype237, _vtype238, _size236 ) = iprot.readMapBegin() 
-          for _i240 in xrange(_size236):
-            _key241 = iprot.readString();
-            _val242 = iprot.readString();
-            self.properties[_key241] = _val242
+          (_ktype267, _vtype268, _size266 ) = iprot.readMapBegin() 
+          for _i270 in xrange(_size266):
+            _key271 = iprot.readString();
+            _val272 = iprot.readString();
+            self.properties[_key271] = _val272
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4365,16 +4765,16 @@ class Schema:
     if self.fieldSchemas is not None:
       oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas))
-      for iter243 in self.fieldSchemas:
-        iter243.write(oprot)
+      for iter273 in self.fieldSchemas:
+        iter273.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.properties is not None:
       oprot.writeFieldBegin('properties', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
-      for kiter244,viter245 in self.properties.items():
-        oprot.writeString(kiter244)
-        oprot.writeString(viter245)
+      for kiter274,viter275 in self.properties.items():
+        oprot.writeString(kiter274)
+        oprot.writeString(viter275)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4421,11 +4821,11 @@ class EnvironmentContext:
       if fid == 1:
         if ftype == TType.MAP:
           self.properties = {}
-          (_ktype247, _vtype248, _size246 ) = iprot.readMapBegin() 
-          for _i250 in xrange(_size246):
-            _key251 = iprot.readString();
-            _val252 = iprot.readString();
-            self.properties[_key251] = _val252
+          (_ktype277, _vtype278, _size276 ) = iprot.readMapBegin() 
+          for _i280 in xrange(_size276):
+            _key281 = iprot.readString();
+            _val282 = iprot.readString();
+            self.properties[_key281] = _val282
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4442,9 +4842,9 @@ class EnvironmentContext:
     if self.properties is not None:
       oprot.writeFieldBegin('properties', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
-      for kiter253,viter254 in self.properties.items():
-        oprot.writeString(kiter253)
-        oprot.writeString(viter254)
+      for kiter283,viter284 in self.properties.items():
+        oprot.writeString(kiter283)
+        oprot.writeString(viter284)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4494,11 +4894,11 @@ class PartitionsByExprResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.partitions = []
-          (_etype258, _size255) = iprot.readListBegin()
-          for _i259 in xrange(_size255):
-            _elem260 = Partition()
-            _elem260.read(iprot)
-            self.partitions.append(_elem260)
+          (_etype288, _size285) = iprot.readListBegin()
+          for _i289 in xrange(_size285):
+            _elem290 = Partition()
+            _elem290.read(iprot)
+            self.partitions.append(_elem290)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4520,8 +4920,8 @@ class PartitionsByExprResult:
     if self.partitions is not None:
       oprot.writeFieldBegin('partitions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-      for iter261 in self.partitions:
-        iter261.write(oprot)
+      for iter291 in self.partitions:
+        iter291.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.hasUnknownPartitions is not None:
@@ -4690,11 +5090,11 @@ class TableStatsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.tableStats = []
-          (_etype265, _size262) = iprot.readListBegin()
-          for _i266 in xrange(_size262):
-            _elem267 = ColumnStatisticsObj()
-            _elem267.read(iprot)
-            self.tableStats.append(_elem267)
+          (_etype295, _size292) = iprot.readListBegin()
+          for _i296 in xrange(_size292):
+            _elem297 = ColumnStatisticsObj()
+            _elem297.read(iprot)
+            self.tableStats.append(_elem297)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4711,8 +5111,8 @@ class TableStatsResult:
     if self.tableStats is not None:
       oprot.writeFieldBegin('tableStats', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.tableStats))
-      for iter268 in self.tableStats:
-        iter268.write(oprot)
+      for iter298 in self.tableStats:
+        iter298.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4761,17 +5161,17 @@ class PartitionsStatsResult:
       if fid == 1:
         if ftype == TType.MAP:
           self.partStats = {}
-          (_ktype270, _vtype271, _size269 ) = iprot.readMapBegin() 
-          for _i273 in xrange(_size269):
-            _key274 = iprot.readString();
-            _val275 = []
-            (_etype279, _size276) = iprot.readListBegin()
-            for _i280 in xrange(_size276):
-              _elem281 = ColumnStatisticsObj()
-              _elem281.read(iprot)
-              _val275.append(_elem281)
+          (_ktype300, _vtype301, _size299 ) = iprot.readMapBegin() 
+          for _i303 in xrange(_size299):
+            _key304 = iprot.readString();
+            _val305 = []
+            (_etype309, _size306) = iprot.readListBegin()
+            for _i310 in xrange(_size306):
+              _elem311 = ColumnStatisticsObj()
+              _elem311.read(iprot)
+              _val305.append(_elem311)
             iprot.readListEnd()
-            self.partStats[_key274] = _val275
+            self.partStats[_key304] = _val305
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4788,11 +5188,11 @@ class PartitionsStatsResult:
     if self.partStats is not None:
       oprot.writeFieldBegin('partStats', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats))
-      for kiter282,viter283 in self.partStats.items():
-        oprot.writeString(kiter282)
-        oprot.writeListBegin(TType.STRUCT, len(viter283))
-        for iter284 in viter283:
-          iter284.write(oprot)
+      for kiter312,viter313 in self.partStats.items():
+        oprot.writeString(kiter312)
+        oprot.writeListBegin(TType.STRUCT, len(viter313))
+        for iter314 in viter313:
+          iter314.write(oprot)
         oprot.writeListEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
@@ -4858,10 +5258,10 @@ class TableStatsRequest:
       elif fid == 3:
         if ftype == TType.LIST:
           self.colNames = []
-          (_etype288, _size285) = iprot.readListBegin()
-          for _i289 in xrange(_size285):
-            _elem290 = iprot.readString();
-            self.colNames.append(_elem290)
+          (_etype318, _size315) = iprot.readListBegin()
+          for _i319 in xrange(_size315):
+            _elem320 = iprot.readString();
+            self.colNames.append(_elem320)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4886,8 +5286,8 @@ class TableStatsRequest:
     if self.colNames is not None:
       oprot.writeFieldBegin('colNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.colNames))
-      for iter291 in self.colNames:
-        oprot.writeString(iter291)
+      for iter321 in self.colNames:
+        oprot.writeString(iter321)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4959,20 +5359,20 @@ class PartitionsStatsRequest:
       elif fid == 3:
         if ftype == TType.LIST:
           self.colNames = []
-          (_etype295, _size292) = iprot.readListBegin()
-          for _i296 in xrange(_size292):
-            _elem297 = iprot.readString();
-            self.colNames.append(_elem297)
+          (_etype325, _size322) = iprot.readListBegin()
+          for _i326 in xrange(_size322):
+            _elem327 = iprot.readString();
+            self.colNames.append(_elem327)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.LIST:
           self.partNames = []
-          (_etype301, _size298) = iprot.readListBegin()
-          for _i302 in xrange(_size298):
-            _elem303 = iprot.readString();
-            self.partNames.append(_elem303)
+          (_etype331, _size328) = iprot.readListBegin()
+          for _i332 in xrange(_size328):
+            _elem333 = iprot.readString();
+            self.partNames.append(_elem333)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4997,15 +5397,15 @@ class PartitionsStatsRequest:
     if self.colNames is not None:
       oprot.writeFieldBegin('colNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.colNames))
-      for iter304 in self.colNames:
-        oprot.writeString(iter304)
+      for iter334 in self.colNames:
+        oprot.writeString(iter334)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.partNames is not None:
       oprot.writeFieldBegin('partNames', TType.LIST, 4)
       oprot.writeListBegin(TType.STRING, len(self.partNames))
-      for iter305 in self.partNames:
-        oprot.writeString(iter305)
+      for iter335 in self.partNames:
+        oprot.writeString(iter335)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5060,11 +5460,11 @@ class AddPartitionsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.partitions = []
-          (_etype309, _size306) = iprot.readListBegin()
-          for _i310 in xrange(_size306):
-            _elem311 = Partition()
-            _elem311.read(iprot)
-            self.partitions.append(_elem311)
+          (_etype339, _size336) = iprot.readListBegin()
+          for _i340 in xrange(_size336):
+            _elem341 = Partition()
+            _elem341.read(iprot)
+            self.partitions.append(_elem341)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5081,8 +5481,8 @@ class AddPartitionsResult:
     if self.partitions is not None:
       oprot.writeFieldBegin('partitions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-      for iter312 in self.partitions:
-        iter312.write(oprot)
+      for iter342 in self.partitions:
+        iter342.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5151,11 +5551,11 @@ class AddPartitionsRequest:
       elif fid == 3:
         if ftype == TType.LIST:
           self.parts = []
-          (_etype316, _size313) = iprot.readListBegin()
-          for _i317 in xrange(_size313):
-            _elem318 = Partition()
-            _elem318.read(iprot)
-            self.parts.append(_elem318)
+          (_etype346, _size343) = iprot.readListBegin()
+          for _i347 in xrange(_size343):
+            _elem348 = Partition()
+            _elem348.read(iprot)
+            self.parts.append(_elem348)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5190,8 +5590,8 @@ class AddPartitionsRequest:
     if self.parts is not None:
       oprot.writeFieldBegin('parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.parts))
-      for iter319 in self.parts:
-        iter319.write(oprot)
+      for iter349 in self.parts:
+        iter349.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.ifNotExists is not None:
@@ -5254,11 +5654,11 @@ class DropPartitionsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.partitions = []
-          (_etype323, _size320) = iprot.readListBegin()
-          for _i324 in xrange(_size320):
-            _elem325 = Partition()
-            _elem325.read(iprot)
-            self.partitions.append(_elem325)
+          (_etype353, _size350) = iprot.readListBegin()
+          for _i354 in xrange(_size350):
+            _elem355 = Partition()
+            _elem355.read(iprot)
+            self.partitions.append(_elem355)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5275,8 +5675,8 @@ class DropPartitionsResult:
     if self.partitions is not None:
       oprot.writeFieldBegin('partitions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-      for iter326 in self.partitions:
-        iter326.write(oprot)
+      for iter356 in self.partitions:
+        iter356.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5400,21 +5800,21 @@ class RequestPartsSpec:
       if fid == 1:
         if ftype == TType.LIST:
           self.names = []
-          (_etype330, _size327) = iprot.readListBegin()
-          for _i331 in xrange(_size327):
-            _elem332 = iprot.readString();
-            self.names.append(_elem332)
+          (_etype360, _size357) = iprot.readListBegin()
+          for _i361 in xrange(_size357):
+            _elem362 = iprot.readString();
+            self.names.append(_elem362)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.LIST:
           self.exprs = []
-          (_etype336, _size333) = iprot.readListBegin()
-          for _i337 in xrange(_size333):
-            _elem338 = DropPartitionsExpr()
-            _elem338.read(iprot)
-            self.exprs.append(_elem338)
+          (_etype366, _size363) = iprot.readListBegin()
+          for _i367 in xrange(_size363):
+            _elem368 = DropPartitionsExpr()
+            _elem368.read(iprot)
+            self.exprs.append(_elem368)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5431,15 +5831,15 @@ class RequestPartsSpec:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter339 in self.names:
-        oprot.writeString(iter339)
+      for iter369 in self.names:
+        oprot.writeString(iter369)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.exprs is not None:
       oprot.writeFieldBegin('exprs', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.exprs))
-      for iter340 in self.exprs:
-        iter340.write(oprot)
+      for iter370 in self.exprs:
+        iter370.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5766,11 +6166,11 @@ class Function:
       elif fid == 8:
         if ftype == TType.LIST:
           self.resourceUris = []
-          (_etype344, _size341) = iprot.readListBegin()
-          for _i345 in xrange(_size341):
-            _elem346 = ResourceUri()
-            _elem346.read(iprot)
-            self.resourceUris.append(_elem346)
+          (_etype374, _size371) = iprot.readListBegin()
+          for _i375 in xrange(_size371):
+            _elem376 = ResourceUri()
+            _elem376.read(iprot)
+            self.resourceUris.append(_elem376)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5815,8 +6215,8 @@ class Function:
     if self.resourceUris is not None:
       oprot.writeFieldBegin('resourceUris', TType.LIST, 8)
       oprot.writeListBegin(TType.STRUCT, len(self.resourceUris))
-      for iter347 in self.resourceUris:
-        iter347.write(oprot)
+      for iter377 in self.resourceUris:
+        iter377.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5975,11 +6375,11 @@ class GetOpenTxnsInfoResponse:
       elif fid == 2:
         if ftype == TType.LIST:
           self.open_txns = []
-          (_etype351, _size348) = iprot.readListBegin()
-          for _i352 in xrange(_size348):
-            _elem353 = TxnInfo()
-            _elem353.read(iprot)
-            self.open_txns.append(_elem353)
+          (_etype381, _size378) = iprot.readListBegin()
+          for _i382 in xrange(_size378):
+            _elem383 = TxnInfo()
+            _elem383.read(iprot)
+            self.open_txns.append(_elem383)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -6000,8 +6400,8 @@ class GetOpenTxnsInfoResponse:
     if self.open_txns is not None:
       oprot.writeFieldBegin('open_txns', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.open_txns))
-      for iter354 in self.open_txns:
-        iter354.write(oprot)
+      for iter384 in self.open_txns:
+        iter384.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6060,10 +6460,10 @@ class GetOpenTxnsResponse:
       elif fid == 2:
         if ftype == TType.SET:
           self.open_txns = set()
-          (_etype358, _size355) = iprot.readSetBegin()
-          for _i359 in xrange(_size355):
-            _elem360 = iprot.readI64();
-            self.open_txns.add(_elem360)
+          (_etype388, _size385) = iprot.readSetBegin()
+          for _i389 in xrange(_size385):
+            _elem390 = iprot.readI64();
+            self.open_txns.add(_elem390)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -6084,8 +6484,8 @@ class GetOpenTxnsResponse:
     if self.open_txns is not None:
       oprot.writeFieldBegin('open_txns', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.open_txns))
-      for iter361 in self.open_txns:
-        oprot.writeI64(iter361)
+      for iter391 in self.open_txns:
+        oprot.writeI64(iter391)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6226,10 +6626,10 @@ class OpenTxnsResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.txn_ids = []
-          (_etype365, _size362) = iprot.readListBegin()
-          for _i366 in xrange(_size362):
-            _elem367 = iprot.readI64();
-            self.txn_ids.append(_elem367)
+          (_etype395, _size392) = iprot.readListBegin()
+          for _i396 in xrange(_size392):
+            _elem397 = iprot.readI64();
+            self.txn_ids.append(_elem397)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -6246,8 +6646,8 @@ class OpenTxnsResponse:
     if self.txn_ids is not None:
       oprot.writeFieldBegin('txn_ids', TType.LIST, 1)
       oprot.writeListBegin(TType.I64, len(self.txn_ids))
-      for iter368 in self.txn_ids:
-        oprot.writeI64(iter368)
+      for iter398 in self.txn_ids:
+        oprot.writeI64(iter398)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6543,11 +6943,11 @@ class LockRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.component = []
-          (_etype372, _size369) = iprot.readListBegin()
-          for _i373 in xrange(_size369):
-            _elem374 = LockComponent()
-            _elem374.read(iprot)
-            self.component.append(_elem374)
+          (_etype402, _size399) = iprot.readListBegin()
+          for _i403 in xrange(_size399):
+            _elem404 = LockComponent()
+            _elem404.read(iprot)
+            self.component.append(_elem404)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -6579,8 +6979,8 @@ class LockRequest:
     if self.component is not None:
       oprot.writeFieldBegin('component', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.component))
-      for iter375 in self.component:
-        iter375.write(oprot)
+      for iter405 in self.component:
+        iter405.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.txnid is not None:
@@ -7081,11 +7481,11 @@ class ShowLocksResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.locks = []
-          (_etype379, _size376) = iprot.readListBegin()
-          for _i380 in xrange(_size376):
-            _elem381 = ShowLocksResponseElement()
-            _elem381.read(iprot)
-            self.locks.append(_elem381)
+          (_etype409, _size406) = iprot.readListBegin()
+          for _i410 in xrange(_size406):
+            _elem411 = ShowLocksResponseElement()
+            _elem411.read(iprot)
+            self.locks.append(_elem411)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7102,8 +7502,8 @@ class ShowLocksResponse:
     if self.locks is not None:
       oprot.writeFieldBegin('locks', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.locks))
-      for iter382 in self.locks:
-        iter382.write(oprot)
+      for iter412 in self.locks:
+        iter412.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7301,20 +7701,20 @@ class HeartbeatTxnRangeResponse:
       if fid == 1:
         if ftype == TType.SET:
           self.aborted = set()
-          (_etype386, _size383) = iprot.readSetBegin()
-          for _i387 in xrange(_size383):
-            _elem388 = iprot.readI64();
-            self.aborted.add(_elem388)
+          (_etype416, _size413) = iprot.readSetBegin()
+          for _i417 in xrange(_size413):
+            _elem418 = iprot.readI64();
+            self.aborted.add(_elem418)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.SET:
           self.nosuch = set()
-          (_etype392, _size389) = iprot.readSetBegin()
-          for _i393 in xrange(_size389):
-            _elem394 = iprot.readI64();
-            self.nosuch.add(_elem394)
+          (_etype422, _size419) = iprot.readSetBegin()
+          for _i423 in xrange(_size419):
+            _elem424 = iprot.readI64();
+            self.nosuch.add(_elem424)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -7331,15 +7731,15 @@ class HeartbeatTxnRangeResponse:
     if self.aborted is not None:
       oprot.writeFieldBegin('aborted', TType.SET, 1)
       oprot.writeSetBegin(TType.I64, len(self.aborted))
-      for iter395 in self.aborted:
-        oprot.writeI64(iter395)
+      for iter425 in self.aborted:
+        oprot.writeI64(iter425)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     if self.nosuch is not None:
       oprot.writeFieldBegin('nosuch', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.nosuch))
-      for iter396 in self.nosuch:
-        oprot.writeI64(iter396)
+      for iter426 in self.nosuch:
+        oprot.writeI64(iter426)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7706,11 +8106,11 @@ class ShowCompactResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.compacts = []
-          (_etype400, _size397) = iprot.readListBegin()
-          for _i401 in xrange(_size397):
-            _elem402 = ShowCompactResponseElement()
-            _elem402.read(iprot)
-            self.compacts.append(_elem402)
+          (_etype430, _size427) = iprot.readListBegin()
+          for _i431 in xrange(_size427):
+            _elem432 = ShowCompactResponseElement()
+            _elem432.read(iprot)
+            self.compacts.append(_elem432)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7727,8 +8127,8 @@ class ShowCompactResponse:
     if self.compacts is not None:
       oprot.writeFieldBegin('compacts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.compacts))
-      for iter403 in self.compacts:
-        iter403.write(oprot)
+      for iter433 in self.compacts:
+        iter433.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()

Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb Fri Sep  5 17:52:32 2014
@@ -687,6 +687,90 @@ class Partition
   ::Thrift::Struct.generate_accessors self
 end
 
+class PartitionWithoutSD
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  VALUES = 1
+  CREATETIME = 2
+  LASTACCESSTIME = 3
+  RELATIVEPATH = 4
+  PARAMETERS = 5
+  PRIVILEGES = 6
+
+  FIELDS = {
+    VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}},
+    CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime'},
+    LASTACCESSTIME => {:type => ::Thrift::Types::I32, :name => 'lastAccessTime'},
+    RELATIVEPATH => {:type => ::Thrift::Types::STRING, :name => 'relativePath'},
+    PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+    PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class PartitionSpecWithSharedSD
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  PARTITIONS = 1
+  SD = 2
+
+  FIELDS = {
+    PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::PartitionWithoutSD}},
+    SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class PartitionListComposingSpec
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  PARTITIONS = 1
+
+  FIELDS = {
+    PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class PartitionSpec
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  DBNAME = 1
+  TABLENAME = 2
+  ROOTPATH = 3
+  SHAREDSDPARTITIONSPEC = 4
+  PARTITIONLIST = 5
+
+  FIELDS = {
+    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+    TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+    ROOTPATH => {:type => ::Thrift::Types::STRING, :name => 'rootPath'},
+    SHAREDSDPARTITIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'sharedSDPartitionSpec', :class => ::PartitionSpecWithSharedSD, :optional => true},
+    PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class Index
   include ::Thrift::Struct, ::Thrift::Struct_Union
   INDEXNAME = 1

Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Fri Sep  5 17:52:32 2014
@@ -485,6 +485,24 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partitions failed: unknown result')
     end
 
+    def add_partitions_pspec(new_parts)
+      send_add_partitions_pspec(new_parts)
+      return recv_add_partitions_pspec()
+    end
+
+    def send_add_partitions_pspec(new_parts)
+      send_message('add_partitions_pspec', Add_partitions_pspec_args, :new_parts => new_parts)
+    end
+
+    def recv_add_partitions_pspec()
+      result = receive_message(Add_partitions_pspec_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partitions_pspec failed: unknown result')
+    end
+
     def append_partition(db_name, tbl_name, part_vals)
       send_append_partition(db_name, tbl_name, part_vals)
       return recv_append_partition()
@@ -764,6 +782,23 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_with_auth failed: unknown result')
     end
 
+    def get_partitions_pspec(db_name, tbl_name, max_parts)
+      send_get_partitions_pspec(db_name, tbl_name, max_parts)
+      return recv_get_partitions_pspec()
+    end
+
+    def send_get_partitions_pspec(db_name, tbl_name, max_parts)
+      send_message('get_partitions_pspec', Get_partitions_pspec_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts)
+    end
+
+    def recv_get_partitions_pspec()
+      result = receive_message(Get_partitions_pspec_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_pspec failed: unknown result')
+    end
+
     def get_partition_names(db_name, tbl_name, max_parts)
       send_get_partition_names(db_name, tbl_name, max_parts)
       return recv_get_partition_names()
@@ -848,6 +883,23 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_filter failed: unknown result')
     end
 
+    def get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)
+      send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)
+      return recv_get_part_specs_by_filter()
+    end
+
+    def send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)
+      send_message('get_part_specs_by_filter', Get_part_specs_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts)
+    end
+
+    def recv_get_part_specs_by_filter()
+      result = receive_message(Get_part_specs_by_filter_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_part_specs_by_filter failed: unknown result')
+    end
+
     def get_partitions_by_expr(req)
       send_get_partitions_by_expr(req)
       return recv_get_partitions_by_expr()
@@ -2292,6 +2344,21 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'add_partitions', seqid)
     end
 
+    def process_add_partitions_pspec(seqid, iprot, oprot)
+      args = read_args(iprot, Add_partitions_pspec_args)
+      result = Add_partitions_pspec_result.new()
+      begin
+        result.success = @handler.add_partitions_pspec(args.new_parts)
+      rescue ::InvalidObjectException => o1
+        result.o1 = o1
+      rescue ::AlreadyExistsException => o2
+        result.o2 = o2
+      rescue ::MetaException => o3
+        result.o3 = o3
+      end
+      write_result(result, oprot, 'add_partitions_pspec', seqid)
+    end
+
     def process_append_partition(seqid, iprot, oprot)
       args = read_args(iprot, Append_partition_args)
       result = Append_partition_result.new()
@@ -2514,6 +2581,19 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'get_partitions_with_auth', seqid)
     end
 
+    def process_get_partitions_pspec(seqid, iprot, oprot)
+      args = read_args(iprot, Get_partitions_pspec_args)
+      result = Get_partitions_pspec_result.new()
+      begin
+        result.success = @handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'get_partitions_pspec', seqid)
+    end
+
     def process_get_partition_names(seqid, iprot, oprot)
       args = read_args(iprot, Get_partition_names_args)
       result = Get_partition_names_result.new()
@@ -2577,6 +2657,19 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'get_partitions_by_filter', seqid)
     end
 
+    def process_get_part_specs_by_filter(seqid, iprot, oprot)
+      args = read_args(iprot, Get_part_specs_by_filter_args)
+      result = Get_part_specs_by_filter_result.new()
+      begin
+        result.success = @handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts)
+      rescue ::MetaException => o1
+        result.o1 = o1
+      rescue ::NoSuchObjectException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'get_part_specs_by_filter', seqid)
+    end
+
     def process_get_partitions_by_expr(seqid, iprot, oprot)
       args = read_args(iprot, Get_partitions_by_expr_args)
       result = Get_partitions_by_expr_result.new()
@@ -4422,6 +4515,44 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Add_partitions_pspec_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    NEW_PARTS = 1
+
+    FIELDS = {
+      NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::PartitionSpec}}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Add_partitions_pspec_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+    O3 = 3
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::I32, :name => 'success'},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::AlreadyExistsException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Append_partition_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1
@@ -5096,6 +5227,46 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Get_partitions_pspec_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    DB_NAME = 1
+    TBL_NAME = 2
+    MAX_PARTS = 3
+
+    FIELDS = {
+      DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
+      TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
+      MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_partitions_pspec_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => ::PartitionSpec}},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Get_partition_names_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1
@@ -5306,6 +5477,48 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Get_part_specs_by_filter_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    DB_NAME = 1
+    TBL_NAME = 2
+    FILTER = 3
+    MAX_PARTS = 4
+
+    FIELDS = {
+      DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
+      TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
+      FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter'},
+      MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_part_specs_by_filter_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => ::PartitionSpec}},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Get_partitions_by_expr_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     REQ = 1

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Fri Sep  5 17:52:32 2014
@@ -48,6 +48,9 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
 
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableListMultimap;
+import com.google.common.collect.Multimaps;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -112,6 +115,10 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD;
+import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD;
 import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
 import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult;
 import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
@@ -129,6 +136,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
 import org.apache.hadoop.hive.metastore.api.TableStatsResult;
@@ -171,6 +179,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.txn.TxnHandler;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.SerDeException;
@@ -1852,6 +1861,52 @@ public class HiveMetaStore extends Thrif
       }
     }
 
+    private static class PartValEqWrapperLite {
+      List<String> values;
+      String location;
+
+      public PartValEqWrapperLite(Partition partition) {
+        this.values = partition.isSetValues()? partition.getValues() : null;
+        this.location = partition.getSd().getLocation();
+      }
+
+      @Override
+      public int hashCode() {
+        return values == null ? 0 : values.hashCode();
+      }
+
+      @Override
+      public boolean equals(Object obj) {
+        if (this == obj) {
+          return true;
+        }
+        if (obj == null || !(obj instanceof PartValEqWrapperLite)) {
+          return false;
+        }
+
+        List<String> lhsValues = this.values;
+        List<String> rhsValues = ((PartValEqWrapperLite)obj).values;
+
+        if (lhsValues == null || rhsValues == null)
+          return lhsValues == rhsValues;
+
+        if (lhsValues.size() != rhsValues.size())
+          return false;
+
+        for (int i=0; i<lhsValues.size(); ++i) {
+          String lhsValue = lhsValues.get(i);
+          String rhsValue = rhsValues.get(i);
+
+          if ((lhsValue == null && rhsValue != null)
+              || (lhsValue != null && !lhsValue.equals(rhsValue))) {
+            return false;
+          }
+        }
+
+        return true;
+      }
+    }
+
     private List<Partition> add_partitions_core(
         RawStore ms, String dbName, String tblName, List<Partition> parts, boolean ifNotExists)
             throws MetaException, InvalidObjectException, AlreadyExistsException, TException {
@@ -1979,6 +2034,85 @@ public class HiveMetaStore extends Thrif
       return ret;
     }
 
+    @Override
+    public int add_partitions_pspec(final List<PartitionSpec> partSpecs)
+        throws TException {
+      logInfo("add_partitions_pspec");
+
+      if (partSpecs.isEmpty()) {
+        return 0;
+      }
+
+      String dbName = partSpecs.get(0).getDbName();
+      String tableName = partSpecs.get(0).getTableName();
+
+      return add_partitions_pspec_core(getMS(), dbName, tableName, partSpecs, false);
+    }
+
+    private int add_partitions_pspec_core(
+        RawStore ms, String dbName, String tblName, List<PartitionSpec> partSpecs, boolean ifNotExists)
+        throws TException {
+      boolean success = false;
+      // Ensures that the list doesn't have dups, and keeps track of directories we have created.
+      Map<PartValEqWrapperLite, Boolean> addedPartitions = new HashMap<PartValEqWrapperLite, Boolean>();
+      PartitionSpecProxy partitionSpecProxy = PartitionSpecProxy.Factory.get(partSpecs);
+      PartitionSpecProxy.PartitionIterator partitionIterator = partitionSpecProxy.getPartitionIterator();
+      Table tbl = null;
+      try {
+        ms.openTransaction();
+        tbl = ms.getTable(dbName, tblName);
+        if (tbl == null) {
+          throw new InvalidObjectException("Unable to add partitions because "
+              + "database or table " + dbName + "." + tblName + " does not exist");
+        }
+
+        firePreEvent(new PreAddPartitionEvent(tbl, partitionSpecProxy, this));
+
+        int nPartitions = 0;
+        while(partitionIterator.hasNext()) {
+
+          Partition part = partitionIterator.getCurrent();
+
+          if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
+            throw new MetaException("Partition does not belong to target table "
+                + dbName + "." + tblName + ": " + part);
+          }
+          boolean shouldAdd = startAddPartition(ms, part, ifNotExists);
+          if (!shouldAdd) {
+            LOG.info("Not adding partition " + part + " as it already exists");
+            continue;
+          }
+          boolean madeDir = createLocationForAddedPartition(tbl, part);
+          if (addedPartitions.put(new PartValEqWrapperLite(part), madeDir) != null) {
+            // Technically, for ifNotExists case, we could insert one and discard the other
+            // because the first one now "exists", but it seems better to report the problem
+            // upstream as such a command doesn't make sense.
+            throw new MetaException("Duplicate partitions in the list: " + part);
+          }
+          initializeAddedPartition(tbl, partitionIterator, madeDir);
+
+          ++nPartitions;
+          partitionIterator.next();
+        }
+
+        success = ms.addPartitions(dbName, tblName, partitionSpecProxy, ifNotExists)
+               && ms.commitTransaction();
+
+        return nPartitions;
+      } finally {
+        if (!success) {
+          ms.rollbackTransaction();
+          for (Entry<PartValEqWrapperLite, Boolean> e : addedPartitions.entrySet()) {
+            if (e.getValue()) {
+              wh.deleteDir(new Path(e.getKey().location), true);
+              // we just created this directory - it's not a case of pre-creation, so we nuke
+            }
+          }
+        }
+        fireMetaStoreAddPartitionEvent(tbl, partitionSpecProxy, null, true);
+      }
+    }
+
     private boolean startAddPartition(
         RawStore ms, Partition part, boolean ifNotExists) throws MetaException, TException {
       MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
@@ -2039,9 +2173,14 @@ public class HiveMetaStore extends Thrif
 
     private void initializeAddedPartition(
         final Table tbl, final Partition part, boolean madeDir) throws MetaException {
+      initializeAddedPartition(tbl, new PartitionSpecProxy.SimplePartitionWrapperIterator(part), madeDir);
+    }
+
+    private void initializeAddedPartition(
+        final Table tbl, final PartitionSpecProxy.PartitionIterator part, boolean madeDir) throws MetaException {
       if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
           !MetaStoreUtils.isView(tbl)) {
-        MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir);
+        MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, false);
       }
 
       // set create time
@@ -2122,6 +2261,20 @@ public class HiveMetaStore extends Thrif
       }
     }
 
+    private void fireMetaStoreAddPartitionEvent(final Table tbl,
+        final PartitionSpecProxy partitionSpec, final EnvironmentContext envContext, boolean success)
+          throws MetaException {
+      if (tbl != null && partitionSpec != null) {
+        AddPartitionEvent addPartitionEvent =
+            new AddPartitionEvent(tbl, partitionSpec, success, this);
+        addPartitionEvent.setEnvironmentContext(envContext);
+
+        for (MetaStoreEventListener listener : listeners) {
+          listener.onAddPartition(addPartitionEvent);
+        }
+      }
+    }
+
     @Override
     public Partition add_partition(final Partition part)
         throws InvalidObjectException, AlreadyExistsException, MetaException {
@@ -2582,6 +2735,161 @@ public class HiveMetaStore extends Thrif
     }
 
     @Override
+    public List<PartitionSpec> get_partitions_pspec(final String db_name, final String tbl_name, final int max_parts)
+      throws NoSuchObjectException, MetaException  {
+
+      String dbName = db_name.toLowerCase();
+      String tableName = tbl_name.toLowerCase();
+
+      startTableFunction("get_partitions_pspec", dbName, tableName);
+
+      List<PartitionSpec> partitionSpecs = null;
+      try {
+        Table table = get_table(dbName, tableName);
+        List<Partition> partitions = get_partitions(dbName, tableName, (short) max_parts);
+
+        if (is_partition_spec_grouping_enabled(table)) {
+          partitionSpecs = get_partitionspecs_grouped_by_storage_descriptor(table, partitions);
+        }
+        else {
+          PartitionSpec pSpec = new PartitionSpec();
+          pSpec.setPartitionList(new PartitionListComposingSpec(partitions));
+          pSpec.setDbName(dbName);
+          pSpec.setTableName(tableName);
+          pSpec.setRootPath(table.getSd().getLocation());
+          partitionSpecs = Arrays.asList(pSpec);
+        }
+
+        return partitionSpecs;
+      }
+      finally {
+        endFunction("get_partitions_pspec", partitionSpecs != null && !partitionSpecs.isEmpty(), null, tbl_name);
+      }
+    }
+
+    private static class StorageDescriptorKey {
+
+      private StorageDescriptor sd;
+
+      StorageDescriptorKey(StorageDescriptor sd) { this.sd = sd; }
+
+      StorageDescriptor getSd() {
+        return sd;
+      }
+
+      private String hashCodeKey() {
+        return sd.getInputFormat() + "\t"
+            + sd.getOutputFormat() +  "\t"
+            + sd.getSerdeInfo().getSerializationLib() + "\t"
+            + sd.getCols();
+      }
+
+      @Override
+      public int hashCode() {
+        return hashCodeKey().hashCode();
+      }
+
+      @Override
+      public boolean equals(Object rhs) {
+        if (rhs == this)
+          return true;
+
+        if (!(rhs instanceof StorageDescriptorKey))
+          return false;
+
+        return (hashCodeKey().equals(((StorageDescriptorKey) rhs).hashCodeKey()));
+      }
+    }
+
+    private List<PartitionSpec> get_partitionspecs_grouped_by_storage_descriptor(Table table, List<Partition> partitions)
+      throws NoSuchObjectException, MetaException {
+
+      assert is_partition_spec_grouping_enabled(table);
+
+      final String tablePath = table.getSd().getLocation();
+
+      ImmutableListMultimap<Boolean, Partition> partitionsWithinTableDirectory
+          = Multimaps.index(partitions, new com.google.common.base.Function<Partition, Boolean>() {
+
+        @Override
+        public Boolean apply(Partition input) {
+          return input.getSd().getLocation().startsWith(tablePath);
+        }
+      });
+
+      List<PartitionSpec> partSpecs = new ArrayList<PartitionSpec>();
+
+      // Classify partitions within the table directory into groups,
+      // based on shared SD properties.
+
+      Map<StorageDescriptorKey, List<PartitionWithoutSD>> sdToPartList
+          = new HashMap<StorageDescriptorKey, List<PartitionWithoutSD>>();
+
+      if (partitionsWithinTableDirectory.containsKey(true)) {
+
+        ImmutableList<Partition> partsWithinTableDir = partitionsWithinTableDirectory.get(true);
+        for (Partition partition : partsWithinTableDir) {
+
+          PartitionWithoutSD partitionWithoutSD
+              = new PartitionWithoutSD( partition.getValues(),
+              partition.getCreateTime(),
+              partition.getLastAccessTime(),
+              partition.getSd().getLocation().substring(tablePath.length()), partition.getParameters());
+
+          StorageDescriptorKey sdKey = new StorageDescriptorKey(partition.getSd());
+          if (!sdToPartList.containsKey(sdKey)) {
+            sdToPartList.put(sdKey, new ArrayList<PartitionWithoutSD>());
+          }
+
+          sdToPartList.get(sdKey).add(partitionWithoutSD);
+
+        } // for (partitionsWithinTableDirectory);
+
+        for (Map.Entry<StorageDescriptorKey, List<PartitionWithoutSD>> entry : sdToPartList.entrySet()) {
+          partSpecs.add(getSharedSDPartSpec(table, entry.getKey(), entry.getValue()));
+        }
+
+      } // Done grouping partitions within table-dir.
+
+      // Lump all partitions outside the tablePath into one PartSpec.
+      if (partitionsWithinTableDirectory.containsKey(false)) {
+        List<Partition> partitionsOutsideTableDir = partitionsWithinTableDirectory.get(false);
+        if (!partitionsOutsideTableDir.isEmpty()) {
+          PartitionSpec partListSpec = new PartitionSpec();
+          partListSpec.setDbName(table.getDbName());
+          partListSpec.setTableName(table.getTableName());
+          partListSpec.setPartitionList(new PartitionListComposingSpec(partitionsOutsideTableDir));
+          partSpecs.add(partListSpec);
+        }
+
+      }
+      return partSpecs;
+    }
+
+    private PartitionSpec getSharedSDPartSpec(Table table, StorageDescriptorKey sdKey, List<PartitionWithoutSD> partitions) {
+
+      StorageDescriptor sd = new StorageDescriptor(sdKey.getSd());
+      sd.setLocation(table.getSd().getLocation()); // Use table-dir as root-dir.
+      PartitionSpecWithSharedSD sharedSDPartSpec =
+          new PartitionSpecWithSharedSD(partitions, sd);
+
+      PartitionSpec ret = new PartitionSpec();
+      ret.setRootPath(sd.getLocation());
+      ret.setSharedSDPartitionSpec(sharedSDPartSpec);
+      ret.setDbName(table.getDbName());
+      ret.setTableName(table.getTableName());
+
+      return ret;
+    }
+
+    private static boolean is_partition_spec_grouping_enabled(Table table) {
+
+      Map<String, String> parameters = table.getParameters();
+      return parameters.containsKey("hive.hcatalog.partition.spec.grouping.enabled")
+          && parameters.get("hive.hcatalog.partition.spec.grouping.enabled").equalsIgnoreCase("true");
+    }
+
+    @Override
     public List<String> get_partition_names(final String db_name, final String tbl_name,
         final short max_parts) throws MetaException {
       startTableFunction("get_partition_names", db_name, tbl_name);
@@ -3790,6 +4098,37 @@ public class HiveMetaStore extends Thrif
     }
 
     @Override
+    public List<PartitionSpec> get_part_specs_by_filter(final String dbName,
+        final String tblName, final String filter, final int maxParts)
+        throws MetaException, NoSuchObjectException, TException {
+
+      startTableFunction("get_partitions_by_filter_pspec", dbName, tblName);
+
+      List<PartitionSpec> partitionSpecs = null;
+      try {
+        Table table = get_table(dbName, tblName);
+        List<Partition> partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts);
+
+        if (is_partition_spec_grouping_enabled(table)) {
+          partitionSpecs = get_partitionspecs_grouped_by_storage_descriptor(table, partitions);
+        }
+        else {
+          PartitionSpec pSpec = new PartitionSpec();
+          pSpec.setPartitionList(new PartitionListComposingSpec(partitions));
+          pSpec.setRootPath(table.getSd().getLocation());
+          pSpec.setDbName(dbName);
+          pSpec.setTableName(tblName);
+          partitionSpecs = Arrays.asList(pSpec);
+        }
+
+        return partitionSpecs;
+      }
+      finally {
+        endFunction("get_partitions_by_filter_pspec", partitionSpecs != null && !partitionSpecs.isEmpty(), null, tblName);
+      }
+    }
+
+    @Override
     public PartitionsByExprResult get_partitions_by_expr(
         PartitionsByExprRequest req) throws TException {
       String dbName = req.getDbName(), tblName = req.getTblName();

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Fri Sep  5 17:52:32 2014
@@ -98,6 +98,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
 import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
 import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult;
 import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
@@ -121,6 +122,8 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.metastore.api.UnlockRequest;
+import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.txn.TxnHandler;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -509,6 +512,11 @@ public class HiveMetaStoreClient impleme
     return needResults ? result.getPartitions() : null;
   }
 
+  @Override
+  public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException {
+    return client.add_partitions_pspec(partitionSpec.toPartitionSpec());
+  }
+
   /**
    * @param table_name
    * @param db_name
@@ -911,6 +919,11 @@ public class HiveMetaStoreClient impleme
   }
 
   @Override
+  public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException {
+    return PartitionSpecProxy.Factory.get(client.get_partitions_pspec(dbName, tableName, maxParts));
+  }
+
+  @Override
   public List<Partition> listPartitions(String db_name, String tbl_name,
       List<String> part_vals, short max_parts)
       throws NoSuchObjectException, MetaException, TException {
@@ -958,6 +971,14 @@ public class HiveMetaStoreClient impleme
   }
 
   @Override
+  public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                       String filter, int max_parts) throws MetaException,
+         NoSuchObjectException, TException {
+    return PartitionSpecProxy.Factory.get(
+        client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts));
+  }
+
+  @Override
   public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr,
       String default_partition_name, short max_parts, List<Partition> result)
           throws TException {

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Fri Sep  5 17:52:32 2014
@@ -28,10 +28,12 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
 import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
 import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
 import org.apache.hadoop.hive.metastore.api.TxnOpenException;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
 import java.util.List;
@@ -368,6 +370,9 @@ public interface IMetaStoreClient {
   int add_partitions(List<Partition> partitions)
       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
 
+  int add_partitions_pspec(PartitionSpecProxy partitionSpec)
+      throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+
   /**
    * Add partitions to the table.
    *
@@ -448,6 +453,8 @@ public interface IMetaStoreClient {
   List<Partition> listPartitions(String db_name, String tbl_name,
       short max_parts) throws NoSuchObjectException, MetaException, TException;
 
+  public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts)
+    throws TException;
   List<Partition> listPartitions(String db_name, String tbl_name,
       List<String> part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException;
 
@@ -476,6 +483,9 @@ public interface IMetaStoreClient {
       String filter, short max_parts) throws MetaException,
          NoSuchObjectException, TException;
 
+  PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                       String filter, int max_parts) throws MetaException,
+         NoSuchObjectException, TException;
 
   /**
    * Get list of partitions matching specified serialized expression

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Fri Sep  5 17:52:32 2014
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.SerDeException;
@@ -288,6 +289,22 @@ public class MetaStoreUtils {
    */
   public static boolean updatePartitionStatsFast(Partition part, Warehouse wh,
       boolean madeDir, boolean forceRecompute) throws MetaException {
+    return updatePartitionStatsFast(new PartitionSpecProxy.SimplePartitionWrapperIterator(part),
+                                    wh, madeDir, forceRecompute);
+  }
+
+  /**
+   * Updates the numFiles and totalSize parameters for the passed Partition by querying
+   *  the warehouse if the passed Partition does not already have values for these parameters.
+   * @param part
+   * @param wh
+   * @param madeDir if true, the directory was just created and can be assumed to be empty
+   * @param forceRecompute Recompute stats even if the passed Partition already has
+   * these parameters set
+   * @return true if the stats were updated, false otherwise
+   */
+  public static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionIterator part, Warehouse wh,
+      boolean madeDir, boolean forceRecompute) throws MetaException {
     Map<String,String> params = part.getParameters();
     boolean updated = false;
     if (forceRecompute ||
@@ -297,10 +314,10 @@ public class MetaStoreUtils {
         params = new HashMap<String,String>();
       }
       if (!madeDir) {
-        // The partitition location already existed and may contain data. Lets try to
+        // The partition location already existed and may contain data. Lets try to
         // populate those statistics that don't require a full scan of the data.
         LOG.warn("Updating partition stats fast for: " + part.getTableName());
-        FileStatus[] fileStatus = wh.getFileStatusesForSD(part.getSd());
+        FileStatus[] fileStatus = wh.getFileStatusesForLocation(part.getLocation());
         populateQuickStats(fileStatus, params);
         LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE));
         if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) {

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1622748&r1=1622747&r2=1622748&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Fri Sep  5 17:52:32 2014
@@ -39,6 +39,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
 
 import javax.jdo.JDODataStoreException;
 import javax.jdo.JDOHelper;
@@ -63,6 +64,7 @@ import org.apache.hadoop.hive.common.cla
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -133,6 +135,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.parser.FilterLexer;
 import org.apache.hadoop.hive.metastore.parser.FilterParser;
 import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
 import org.datanucleus.store.rdbms.exceptions.MissingTableException;
@@ -183,6 +186,8 @@ public class ObjectStore implements RawS
   private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE;
   private final AtomicBoolean isSchemaVerified = new AtomicBoolean(false);
 
+  private Pattern partitionValidationPattern;
+
   public ObjectStore() {
   }
 
@@ -227,6 +232,14 @@ public class ObjectStore implements RawS
 
       initialize(propsFromConf);
 
+      String partitionValidationRegex =
+          hiveConf.get(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.name());
+      if (partitionValidationRegex != null && partitionValidationRegex.equals("")) {
+        partitionValidationPattern = Pattern.compile(partitionValidationRegex);
+      } else {
+        partitionValidationPattern = null;
+      }
+
       if (!isInitialized) {
         throw new RuntimeException(
         "Unable to create persistence manager. Check dss.log for details");
@@ -1295,6 +1308,76 @@ public class ObjectStore implements RawS
     return success;
   }
 
+  private boolean isValidPartition(
+      Partition part, boolean ifNotExists) throws MetaException {
+    MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
+        partitionValidationPattern);
+    boolean doesExist = doesPartitionExist(
+        part.getDbName(), part.getTableName(), part.getValues());
+    if (doesExist && !ifNotExists) {
+      throw new MetaException("Partition already exists: " + part);
+    }
+    return !doesExist;
+  }
+
+
+  @Override
+  public boolean addPartitions(String dbName, String tblName,
+                               PartitionSpecProxy partitionSpec, boolean ifNotExists)
+      throws InvalidObjectException, MetaException {
+    boolean success = false;
+    openTransaction();
+    try {
+      List<MTablePrivilege> tabGrants = null;
+      List<MTableColumnPrivilege> tabColumnGrants = null;
+      MTable table = this.getMTable(dbName, tblName);
+      if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
+        tabGrants = this.listAllTableGrants(dbName, tblName);
+        tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName);
+      }
+
+      if (!partitionSpec.getTableName().equals(tblName) || !partitionSpec.getDbName().equals(dbName)) {
+        throw new MetaException("Partition does not belong to target table "
+            + dbName + "." + tblName + ": " + partitionSpec);
+      }
+
+      PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
+
+      int now = (int)(System.currentTimeMillis()/1000);
+
+      while (iterator.hasNext()) {
+        Partition part = iterator.next();
+
+        if (isValidPartition(part, ifNotExists)) {
+          MPartition mpart = convertToMPart(part, true);
+          pm.makePersistent(mpart);
+          if (tabGrants != null) {
+            for (MTablePrivilege tab : tabGrants) {
+              pm.makePersistent(new MPartitionPrivilege(tab.getPrincipalName(),
+                  tab.getPrincipalType(), mpart, tab.getPrivilege(), now,
+                  tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
+            }
+          }
+
+          if (tabColumnGrants != null) {
+            for (MTableColumnPrivilege col : tabColumnGrants) {
+              pm.makePersistent(new MPartitionColumnPrivilege(col.getPrincipalName(),
+                  col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(),
+                  now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
+            }
+          }
+        }
+      }
+
+      success = commitTransaction();
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+    return success;
+  }
+
   @Override
   public boolean addPartition(Partition part) throws InvalidObjectException,
       MetaException {