You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/09/02 21:57:07 UTC

svn commit: r1622108 [11/27] - in /hive/branches/tez: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/ beeline/src/java/org/apache/hive/beeline/ beeline/src/test/org/apache/hive/beeline/ bin/ bin/ext/ checkstyle/ common/src/java/...

Modified: hive/branches/tez/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py (original)
+++ hive/branches/tez/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py Tue Sep  2 19:56:56 2014
@@ -4233,6 +4233,77 @@ class AggrStats:
   def __ne__(self, other):
     return not (self == other)
 
+class SetPartitionsStatsRequest:
+  """
+  Attributes:
+   - colStats
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1
+  )
+
+  def __init__(self, colStats=None,):
+    self.colStats = colStats
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.colStats = []
+          (_etype226, _size223) = iprot.readListBegin()
+          for _i227 in xrange(_size223):
+            _elem228 = ColumnStatistics()
+            _elem228.read(iprot)
+            self.colStats.append(_elem228)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('SetPartitionsStatsRequest')
+    if self.colStats is not None:
+      oprot.writeFieldBegin('colStats', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.colStats))
+      for iter229 in self.colStats:
+        iter229.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.colStats is None:
+      raise TProtocol.TProtocolException(message='Required field colStats is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class Schema:
   """
   Attributes:
@@ -4262,22 +4333,22 @@ class Schema:
       if fid == 1:
         if ftype == TType.LIST:
           self.fieldSchemas = []
-          (_etype226, _size223) = iprot.readListBegin()
-          for _i227 in xrange(_size223):
-            _elem228 = FieldSchema()
-            _elem228.read(iprot)
-            self.fieldSchemas.append(_elem228)
+          (_etype233, _size230) = iprot.readListBegin()
+          for _i234 in xrange(_size230):
+            _elem235 = FieldSchema()
+            _elem235.read(iprot)
+            self.fieldSchemas.append(_elem235)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.MAP:
           self.properties = {}
-          (_ktype230, _vtype231, _size229 ) = iprot.readMapBegin() 
-          for _i233 in xrange(_size229):
-            _key234 = iprot.readString();
-            _val235 = iprot.readString();
-            self.properties[_key234] = _val235
+          (_ktype237, _vtype238, _size236 ) = iprot.readMapBegin() 
+          for _i240 in xrange(_size236):
+            _key241 = iprot.readString();
+            _val242 = iprot.readString();
+            self.properties[_key241] = _val242
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4294,16 +4365,16 @@ class Schema:
     if self.fieldSchemas is not None:
       oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas))
-      for iter236 in self.fieldSchemas:
-        iter236.write(oprot)
+      for iter243 in self.fieldSchemas:
+        iter243.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.properties is not None:
       oprot.writeFieldBegin('properties', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
-      for kiter237,viter238 in self.properties.items():
-        oprot.writeString(kiter237)
-        oprot.writeString(viter238)
+      for kiter244,viter245 in self.properties.items():
+        oprot.writeString(kiter244)
+        oprot.writeString(viter245)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4350,11 +4421,11 @@ class EnvironmentContext:
       if fid == 1:
         if ftype == TType.MAP:
           self.properties = {}
-          (_ktype240, _vtype241, _size239 ) = iprot.readMapBegin() 
-          for _i243 in xrange(_size239):
-            _key244 = iprot.readString();
-            _val245 = iprot.readString();
-            self.properties[_key244] = _val245
+          (_ktype247, _vtype248, _size246 ) = iprot.readMapBegin() 
+          for _i250 in xrange(_size246):
+            _key251 = iprot.readString();
+            _val252 = iprot.readString();
+            self.properties[_key251] = _val252
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4371,9 +4442,9 @@ class EnvironmentContext:
     if self.properties is not None:
       oprot.writeFieldBegin('properties', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
-      for kiter246,viter247 in self.properties.items():
-        oprot.writeString(kiter246)
-        oprot.writeString(viter247)
+      for kiter253,viter254 in self.properties.items():
+        oprot.writeString(kiter253)
+        oprot.writeString(viter254)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4423,11 +4494,11 @@ class PartitionsByExprResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.partitions = []
-          (_etype251, _size248) = iprot.readListBegin()
-          for _i252 in xrange(_size248):
-            _elem253 = Partition()
-            _elem253.read(iprot)
-            self.partitions.append(_elem253)
+          (_etype258, _size255) = iprot.readListBegin()
+          for _i259 in xrange(_size255):
+            _elem260 = Partition()
+            _elem260.read(iprot)
+            self.partitions.append(_elem260)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4449,8 +4520,8 @@ class PartitionsByExprResult:
     if self.partitions is not None:
       oprot.writeFieldBegin('partitions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-      for iter254 in self.partitions:
-        iter254.write(oprot)
+      for iter261 in self.partitions:
+        iter261.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.hasUnknownPartitions is not None:
@@ -4619,11 +4690,11 @@ class TableStatsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.tableStats = []
-          (_etype258, _size255) = iprot.readListBegin()
-          for _i259 in xrange(_size255):
-            _elem260 = ColumnStatisticsObj()
-            _elem260.read(iprot)
-            self.tableStats.append(_elem260)
+          (_etype265, _size262) = iprot.readListBegin()
+          for _i266 in xrange(_size262):
+            _elem267 = ColumnStatisticsObj()
+            _elem267.read(iprot)
+            self.tableStats.append(_elem267)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4640,8 +4711,8 @@ class TableStatsResult:
     if self.tableStats is not None:
       oprot.writeFieldBegin('tableStats', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.tableStats))
-      for iter261 in self.tableStats:
-        iter261.write(oprot)
+      for iter268 in self.tableStats:
+        iter268.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4690,17 +4761,17 @@ class PartitionsStatsResult:
       if fid == 1:
         if ftype == TType.MAP:
           self.partStats = {}
-          (_ktype263, _vtype264, _size262 ) = iprot.readMapBegin() 
-          for _i266 in xrange(_size262):
-            _key267 = iprot.readString();
-            _val268 = []
-            (_etype272, _size269) = iprot.readListBegin()
-            for _i273 in xrange(_size269):
-              _elem274 = ColumnStatisticsObj()
-              _elem274.read(iprot)
-              _val268.append(_elem274)
+          (_ktype270, _vtype271, _size269 ) = iprot.readMapBegin() 
+          for _i273 in xrange(_size269):
+            _key274 = iprot.readString();
+            _val275 = []
+            (_etype279, _size276) = iprot.readListBegin()
+            for _i280 in xrange(_size276):
+              _elem281 = ColumnStatisticsObj()
+              _elem281.read(iprot)
+              _val275.append(_elem281)
             iprot.readListEnd()
-            self.partStats[_key267] = _val268
+            self.partStats[_key274] = _val275
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4717,11 +4788,11 @@ class PartitionsStatsResult:
     if self.partStats is not None:
       oprot.writeFieldBegin('partStats', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats))
-      for kiter275,viter276 in self.partStats.items():
-        oprot.writeString(kiter275)
-        oprot.writeListBegin(TType.STRUCT, len(viter276))
-        for iter277 in viter276:
-          iter277.write(oprot)
+      for kiter282,viter283 in self.partStats.items():
+        oprot.writeString(kiter282)
+        oprot.writeListBegin(TType.STRUCT, len(viter283))
+        for iter284 in viter283:
+          iter284.write(oprot)
         oprot.writeListEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
@@ -4787,10 +4858,10 @@ class TableStatsRequest:
       elif fid == 3:
         if ftype == TType.LIST:
           self.colNames = []
-          (_etype281, _size278) = iprot.readListBegin()
-          for _i282 in xrange(_size278):
-            _elem283 = iprot.readString();
-            self.colNames.append(_elem283)
+          (_etype288, _size285) = iprot.readListBegin()
+          for _i289 in xrange(_size285):
+            _elem290 = iprot.readString();
+            self.colNames.append(_elem290)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4815,8 +4886,8 @@ class TableStatsRequest:
     if self.colNames is not None:
       oprot.writeFieldBegin('colNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.colNames))
-      for iter284 in self.colNames:
-        oprot.writeString(iter284)
+      for iter291 in self.colNames:
+        oprot.writeString(iter291)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4888,20 +4959,20 @@ class PartitionsStatsRequest:
       elif fid == 3:
         if ftype == TType.LIST:
           self.colNames = []
-          (_etype288, _size285) = iprot.readListBegin()
-          for _i289 in xrange(_size285):
-            _elem290 = iprot.readString();
-            self.colNames.append(_elem290)
+          (_etype295, _size292) = iprot.readListBegin()
+          for _i296 in xrange(_size292):
+            _elem297 = iprot.readString();
+            self.colNames.append(_elem297)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.LIST:
           self.partNames = []
-          (_etype294, _size291) = iprot.readListBegin()
-          for _i295 in xrange(_size291):
-            _elem296 = iprot.readString();
-            self.partNames.append(_elem296)
+          (_etype301, _size298) = iprot.readListBegin()
+          for _i302 in xrange(_size298):
+            _elem303 = iprot.readString();
+            self.partNames.append(_elem303)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4926,15 +4997,15 @@ class PartitionsStatsRequest:
     if self.colNames is not None:
       oprot.writeFieldBegin('colNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.colNames))
-      for iter297 in self.colNames:
-        oprot.writeString(iter297)
+      for iter304 in self.colNames:
+        oprot.writeString(iter304)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.partNames is not None:
       oprot.writeFieldBegin('partNames', TType.LIST, 4)
       oprot.writeListBegin(TType.STRING, len(self.partNames))
-      for iter298 in self.partNames:
-        oprot.writeString(iter298)
+      for iter305 in self.partNames:
+        oprot.writeString(iter305)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4989,11 +5060,11 @@ class AddPartitionsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.partitions = []
-          (_etype302, _size299) = iprot.readListBegin()
-          for _i303 in xrange(_size299):
-            _elem304 = Partition()
-            _elem304.read(iprot)
-            self.partitions.append(_elem304)
+          (_etype309, _size306) = iprot.readListBegin()
+          for _i310 in xrange(_size306):
+            _elem311 = Partition()
+            _elem311.read(iprot)
+            self.partitions.append(_elem311)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5010,8 +5081,8 @@ class AddPartitionsResult:
     if self.partitions is not None:
       oprot.writeFieldBegin('partitions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-      for iter305 in self.partitions:
-        iter305.write(oprot)
+      for iter312 in self.partitions:
+        iter312.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5080,11 +5151,11 @@ class AddPartitionsRequest:
       elif fid == 3:
         if ftype == TType.LIST:
           self.parts = []
-          (_etype309, _size306) = iprot.readListBegin()
-          for _i310 in xrange(_size306):
-            _elem311 = Partition()
-            _elem311.read(iprot)
-            self.parts.append(_elem311)
+          (_etype316, _size313) = iprot.readListBegin()
+          for _i317 in xrange(_size313):
+            _elem318 = Partition()
+            _elem318.read(iprot)
+            self.parts.append(_elem318)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5119,8 +5190,8 @@ class AddPartitionsRequest:
     if self.parts is not None:
       oprot.writeFieldBegin('parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.parts))
-      for iter312 in self.parts:
-        iter312.write(oprot)
+      for iter319 in self.parts:
+        iter319.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.ifNotExists is not None:
@@ -5183,11 +5254,11 @@ class DropPartitionsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.partitions = []
-          (_etype316, _size313) = iprot.readListBegin()
-          for _i317 in xrange(_size313):
-            _elem318 = Partition()
-            _elem318.read(iprot)
-            self.partitions.append(_elem318)
+          (_etype323, _size320) = iprot.readListBegin()
+          for _i324 in xrange(_size320):
+            _elem325 = Partition()
+            _elem325.read(iprot)
+            self.partitions.append(_elem325)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5204,8 +5275,8 @@ class DropPartitionsResult:
     if self.partitions is not None:
       oprot.writeFieldBegin('partitions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-      for iter319 in self.partitions:
-        iter319.write(oprot)
+      for iter326 in self.partitions:
+        iter326.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5329,21 +5400,21 @@ class RequestPartsSpec:
       if fid == 1:
         if ftype == TType.LIST:
           self.names = []
-          (_etype323, _size320) = iprot.readListBegin()
-          for _i324 in xrange(_size320):
-            _elem325 = iprot.readString();
-            self.names.append(_elem325)
+          (_etype330, _size327) = iprot.readListBegin()
+          for _i331 in xrange(_size327):
+            _elem332 = iprot.readString();
+            self.names.append(_elem332)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.LIST:
           self.exprs = []
-          (_etype329, _size326) = iprot.readListBegin()
-          for _i330 in xrange(_size326):
-            _elem331 = DropPartitionsExpr()
-            _elem331.read(iprot)
-            self.exprs.append(_elem331)
+          (_etype336, _size333) = iprot.readListBegin()
+          for _i337 in xrange(_size333):
+            _elem338 = DropPartitionsExpr()
+            _elem338.read(iprot)
+            self.exprs.append(_elem338)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5360,15 +5431,15 @@ class RequestPartsSpec:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter332 in self.names:
-        oprot.writeString(iter332)
+      for iter339 in self.names:
+        oprot.writeString(iter339)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.exprs is not None:
       oprot.writeFieldBegin('exprs', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.exprs))
-      for iter333 in self.exprs:
-        iter333.write(oprot)
+      for iter340 in self.exprs:
+        iter340.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5695,11 +5766,11 @@ class Function:
       elif fid == 8:
         if ftype == TType.LIST:
           self.resourceUris = []
-          (_etype337, _size334) = iprot.readListBegin()
-          for _i338 in xrange(_size334):
-            _elem339 = ResourceUri()
-            _elem339.read(iprot)
-            self.resourceUris.append(_elem339)
+          (_etype344, _size341) = iprot.readListBegin()
+          for _i345 in xrange(_size341):
+            _elem346 = ResourceUri()
+            _elem346.read(iprot)
+            self.resourceUris.append(_elem346)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5744,8 +5815,8 @@ class Function:
     if self.resourceUris is not None:
       oprot.writeFieldBegin('resourceUris', TType.LIST, 8)
       oprot.writeListBegin(TType.STRUCT, len(self.resourceUris))
-      for iter340 in self.resourceUris:
-        iter340.write(oprot)
+      for iter347 in self.resourceUris:
+        iter347.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5904,11 +5975,11 @@ class GetOpenTxnsInfoResponse:
       elif fid == 2:
         if ftype == TType.LIST:
           self.open_txns = []
-          (_etype344, _size341) = iprot.readListBegin()
-          for _i345 in xrange(_size341):
-            _elem346 = TxnInfo()
-            _elem346.read(iprot)
-            self.open_txns.append(_elem346)
+          (_etype351, _size348) = iprot.readListBegin()
+          for _i352 in xrange(_size348):
+            _elem353 = TxnInfo()
+            _elem353.read(iprot)
+            self.open_txns.append(_elem353)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5929,8 +6000,8 @@ class GetOpenTxnsInfoResponse:
     if self.open_txns is not None:
       oprot.writeFieldBegin('open_txns', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.open_txns))
-      for iter347 in self.open_txns:
-        iter347.write(oprot)
+      for iter354 in self.open_txns:
+        iter354.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5989,10 +6060,10 @@ class GetOpenTxnsResponse:
       elif fid == 2:
         if ftype == TType.SET:
           self.open_txns = set()
-          (_etype351, _size348) = iprot.readSetBegin()
-          for _i352 in xrange(_size348):
-            _elem353 = iprot.readI64();
-            self.open_txns.add(_elem353)
+          (_etype358, _size355) = iprot.readSetBegin()
+          for _i359 in xrange(_size355):
+            _elem360 = iprot.readI64();
+            self.open_txns.add(_elem360)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -6013,8 +6084,8 @@ class GetOpenTxnsResponse:
     if self.open_txns is not None:
       oprot.writeFieldBegin('open_txns', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.open_txns))
-      for iter354 in self.open_txns:
-        oprot.writeI64(iter354)
+      for iter361 in self.open_txns:
+        oprot.writeI64(iter361)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6155,10 +6226,10 @@ class OpenTxnsResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.txn_ids = []
-          (_etype358, _size355) = iprot.readListBegin()
-          for _i359 in xrange(_size355):
-            _elem360 = iprot.readI64();
-            self.txn_ids.append(_elem360)
+          (_etype365, _size362) = iprot.readListBegin()
+          for _i366 in xrange(_size362):
+            _elem367 = iprot.readI64();
+            self.txn_ids.append(_elem367)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -6175,8 +6246,8 @@ class OpenTxnsResponse:
     if self.txn_ids is not None:
       oprot.writeFieldBegin('txn_ids', TType.LIST, 1)
       oprot.writeListBegin(TType.I64, len(self.txn_ids))
-      for iter361 in self.txn_ids:
-        oprot.writeI64(iter361)
+      for iter368 in self.txn_ids:
+        oprot.writeI64(iter368)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6472,11 +6543,11 @@ class LockRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.component = []
-          (_etype365, _size362) = iprot.readListBegin()
-          for _i366 in xrange(_size362):
-            _elem367 = LockComponent()
-            _elem367.read(iprot)
-            self.component.append(_elem367)
+          (_etype372, _size369) = iprot.readListBegin()
+          for _i373 in xrange(_size369):
+            _elem374 = LockComponent()
+            _elem374.read(iprot)
+            self.component.append(_elem374)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -6508,8 +6579,8 @@ class LockRequest:
     if self.component is not None:
       oprot.writeFieldBegin('component', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.component))
-      for iter368 in self.component:
-        iter368.write(oprot)
+      for iter375 in self.component:
+        iter375.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.txnid is not None:
@@ -7010,11 +7081,11 @@ class ShowLocksResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.locks = []
-          (_etype372, _size369) = iprot.readListBegin()
-          for _i373 in xrange(_size369):
-            _elem374 = ShowLocksResponseElement()
-            _elem374.read(iprot)
-            self.locks.append(_elem374)
+          (_etype379, _size376) = iprot.readListBegin()
+          for _i380 in xrange(_size376):
+            _elem381 = ShowLocksResponseElement()
+            _elem381.read(iprot)
+            self.locks.append(_elem381)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7031,8 +7102,8 @@ class ShowLocksResponse:
     if self.locks is not None:
       oprot.writeFieldBegin('locks', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.locks))
-      for iter375 in self.locks:
-        iter375.write(oprot)
+      for iter382 in self.locks:
+        iter382.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7230,20 +7301,20 @@ class HeartbeatTxnRangeResponse:
       if fid == 1:
         if ftype == TType.SET:
           self.aborted = set()
-          (_etype379, _size376) = iprot.readSetBegin()
-          for _i380 in xrange(_size376):
-            _elem381 = iprot.readI64();
-            self.aborted.add(_elem381)
+          (_etype386, _size383) = iprot.readSetBegin()
+          for _i387 in xrange(_size383):
+            _elem388 = iprot.readI64();
+            self.aborted.add(_elem388)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.SET:
           self.nosuch = set()
-          (_etype385, _size382) = iprot.readSetBegin()
-          for _i386 in xrange(_size382):
-            _elem387 = iprot.readI64();
-            self.nosuch.add(_elem387)
+          (_etype392, _size389) = iprot.readSetBegin()
+          for _i393 in xrange(_size389):
+            _elem394 = iprot.readI64();
+            self.nosuch.add(_elem394)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -7260,15 +7331,15 @@ class HeartbeatTxnRangeResponse:
     if self.aborted is not None:
       oprot.writeFieldBegin('aborted', TType.SET, 1)
       oprot.writeSetBegin(TType.I64, len(self.aborted))
-      for iter388 in self.aborted:
-        oprot.writeI64(iter388)
+      for iter395 in self.aborted:
+        oprot.writeI64(iter395)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     if self.nosuch is not None:
       oprot.writeFieldBegin('nosuch', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.nosuch))
-      for iter389 in self.nosuch:
-        oprot.writeI64(iter389)
+      for iter396 in self.nosuch:
+        oprot.writeI64(iter396)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7635,11 +7706,11 @@ class ShowCompactResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.compacts = []
-          (_etype393, _size390) = iprot.readListBegin()
-          for _i394 in xrange(_size390):
-            _elem395 = ShowCompactResponseElement()
-            _elem395.read(iprot)
-            self.compacts.append(_elem395)
+          (_etype400, _size397) = iprot.readListBegin()
+          for _i401 in xrange(_size397):
+            _elem402 = ShowCompactResponseElement()
+            _elem402.read(iprot)
+            self.compacts.append(_elem402)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7656,8 +7727,8 @@ class ShowCompactResponse:
     if self.compacts is not None:
       oprot.writeFieldBegin('compacts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.compacts))
-      for iter396 in self.compacts:
-        iter396.write(oprot)
+      for iter403 in self.compacts:
+        iter403.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()

Modified: hive/branches/tez/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb (original)
+++ hive/branches/tez/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb Tue Sep  2 19:56:56 2014
@@ -1028,6 +1028,23 @@ class AggrStats
   ::Thrift::Struct.generate_accessors self
 end
 
+class SetPartitionsStatsRequest
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  COLSTATS = 1
+
+  FIELDS = {
+    COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class Schema
   include ::Thrift::Struct, ::Thrift::Struct_Union
   FIELDSCHEMAS = 1

Modified: hive/branches/tez/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original)
+++ hive/branches/tez/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Tue Sep  2 19:56:56 2014
@@ -1279,6 +1279,25 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_aggr_stats_for failed: unknown result')
     end
 
+    def set_aggr_stats_for(request)
+      send_set_aggr_stats_for(request)
+      return recv_set_aggr_stats_for()
+    end
+
+    def send_set_aggr_stats_for(request)
+      send_message('set_aggr_stats_for', Set_aggr_stats_for_args, :request => request)
+    end
+
+    def recv_set_aggr_stats_for()
+      result = receive_message(Set_aggr_stats_for_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      raise result.o4 unless result.o4.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'set_aggr_stats_for failed: unknown result')
+    end
+
     def delete_partition_column_statistics(db_name, tbl_name, part_name, col_name)
       send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name)
       return recv_delete_partition_column_statistics()
@@ -2907,6 +2926,23 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'get_aggr_stats_for', seqid)
     end
 
+    def process_set_aggr_stats_for(seqid, iprot, oprot)
+      args = read_args(iprot, Set_aggr_stats_for_args)
+      result = Set_aggr_stats_for_result.new()
+      begin
+        result.success = @handler.set_aggr_stats_for(args.request)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::InvalidObjectException => o2
+        result.o2 = o2
+      rescue ::MetaException => o3
+        result.o3 = o3
+      rescue ::InvalidInputException => o4
+        result.o4 = o4
+      end
+      write_result(result, oprot, 'set_aggr_stats_for', seqid)
+    end
+
     def process_delete_partition_column_statistics(seqid, iprot, oprot)
       args = read_args(iprot, Delete_partition_column_statistics_args)
       result = Delete_partition_column_statistics_result.new()
@@ -6264,6 +6300,46 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Set_aggr_stats_for_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    REQUEST = 1
+
+    FIELDS = {
+      REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::SetPartitionsStatsRequest}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Set_aggr_stats_for_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+    O3 = 3
+    O4 = 4
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success'},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException},
+      O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Delete_partition_column_statistics_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Tue Sep  2 19:56:56 2014
@@ -42,6 +42,7 @@ import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
 import java.util.Timer;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
@@ -122,6 +123,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
@@ -249,7 +251,7 @@ public class HiveMetaStore extends Thrif
     private static String currentUrl;
 
     private Warehouse wh; // hdfs warehouse
-    private final ThreadLocal<RawStore> threadLocalMS =
+    private static final ThreadLocal<RawStore> threadLocalMS =
         new ThreadLocal<RawStore>() {
           @Override
           protected synchronized RawStore initialValue() {
@@ -264,6 +266,14 @@ public class HiveMetaStore extends Thrif
       }
     };
 
+    public static RawStore getRawStore() {
+      return threadLocalMS.get();
+    }
+
+    public static void removeRawStore() {
+      threadLocalMS.remove();
+    }
+
     // Thread local configuration is needed as many threads could make changes
     // to the conf using the connection hook
     private final ThreadLocal<Configuration> threadLocalConf =
@@ -383,6 +393,7 @@ public class HiveMetaStore extends Thrif
       }
     }
 
+    @Override
     public void init() throws MetaException {
       rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL);
       initListeners = MetaStoreUtils.getMetaStoreListeners(
@@ -435,7 +446,7 @@ public class HiveMetaStore extends Thrif
         partitionValidationPattern = null;
       }
 
-      long cleanFreq = hiveConf.getLongVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ) * 1000L;
+      long cleanFreq = hiveConf.getTimeVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ, TimeUnit.MILLISECONDS);
       if (cleanFreq > 0) {
         // In default config, there is no timer.
         Timer cleaner = new Timer("Metastore Events Cleaner Thread", true);
@@ -3709,6 +3720,19 @@ public class HiveMetaStore extends Thrif
       } finally {
         endFunction("write_partition_column_statistics: ", ret != false, null, tableName);
       }
+    } 
+    public boolean update_partition_column_statistics(
+        SetPartitionsStatsRequest request) throws NoSuchObjectException,
+        InvalidObjectException, MetaException, TException,
+        InvalidInputException {
+      boolean ret = false;
+      try {
+        ret = getMS().updatePartitionColumnStatistics(request);
+        return ret;
+      } finally {
+        endFunction("write_partition_column_statistics: ", ret != false, null,
+            null);
+      }
     }
 
     @Override
@@ -5023,12 +5047,8 @@ public class HiveMetaStore extends Thrif
       startFunction("get_aggr_stats_for: db=" + request.getDbName() + " table=" + request.getTblName());
       AggrStats aggrStats = null;
       try {
-        //TODO: We are setting partitionCnt for which we were able to retrieve stats same as
-        // incoming number from request. This is not correct, but currently no users of this api
-        // rely on this. Only, current user StatsAnnotation don't care for it. StatsOptimizer
-        // will care for it, so before StatsOptimizer begin using it, we need to fix this.
         aggrStats = new AggrStats(getMS().get_aggr_stats_for(request.getDbName(),
-          request.getTblName(), request.getPartNames(), request.getColNames()), request.getPartNames().size());
+          request.getTblName(), request.getPartNames(), request.getColNames()));
         return aggrStats;
       } finally {
           endFunction("get_partitions_statistics_req: ", aggrStats == null, null, request.getTblName());
@@ -5036,8 +5056,15 @@ public class HiveMetaStore extends Thrif
 
     }
 
+    @Override
+    public boolean set_aggr_stats_for(SetPartitionsStatsRequest request)
+        throws NoSuchObjectException, InvalidObjectException, MetaException,
+        InvalidInputException, TException {
+      return update_partition_column_statistics(request);
+    }
   }
 
+  
   public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException {
     return newHMSHandler(name, hiveConf, false);
   }

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Tue Sep  2 19:56:56 2014
@@ -39,6 +39,7 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.concurrent.TimeUnit;
 
 import javax.security.auth.login.LoginException;
 
@@ -105,6 +106,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
@@ -150,7 +152,7 @@ public class HiveMetaStoreClient impleme
 
   // for thrift connects
   private int retries = 5;
-  private int retryDelaySeconds = 0;
+  private long retryDelaySeconds = 0;
 
   static final protected Log LOG = LogFactory.getLog("hive.metastore");
 
@@ -181,7 +183,8 @@ public class HiveMetaStoreClient impleme
 
     // get the number retries
     retries = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES);
-    retryDelaySeconds = conf.getIntVar(ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY);
+    retryDelaySeconds = conf.getTimeVar(
+        ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
 
     // user wants file store based configuration
     if (conf.getVar(HiveConf.ConfVars.METASTOREURIS) != null) {
@@ -316,13 +319,14 @@ public class HiveMetaStoreClient impleme
     HadoopShims shim = ShimLoader.getHadoopShims();
     boolean useSasl = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL);
     boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
-    int clientSocketTimeout = conf.getIntVar(ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT);
+    int clientSocketTimeout = (int) conf.getTimeVar(
+        ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
 
     for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
       for (URI store : metastoreUris) {
         LOG.info("Trying to connect to metastore with URI " + store);
         try {
-          transport = new TSocket(store.getHost(), store.getPort(), 1000 * clientSocketTimeout);
+          transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
           if (useSasl) {
             // Wrap thrift connection with SASL for secure connection.
             try {
@@ -1264,6 +1268,13 @@ public class HiveMetaStoreClient impleme
     InvalidInputException{
     return client.update_partition_column_statistics(statsObj);
   }
+  
+  /** {@inheritDoc} */
+  public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
+    throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+    InvalidInputException{
+    return client.set_aggr_stats_for(request);
+  }
 
   /** {@inheritDoc} */
   @Override

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Tue Sep  2 19:56:56 2014
@@ -65,6 +65,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
@@ -1298,4 +1299,6 @@ public interface IMetaStoreClient {
 
   public AggrStats getAggrColStatsFor(String dbName, String tblName,
       List<String> colNames, List<String> partName)  throws NoSuchObjectException, MetaException, TException;
+
+  boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException;
 }

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java Tue Sep  2 19:56:56 2014
@@ -21,19 +21,18 @@ package org.apache.hadoop.hive.metastore
 import static org.apache.commons.lang.StringUtils.join;
 import static org.apache.commons.lang.StringUtils.repeat;
 
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
 import java.sql.Connection;
 import java.sql.SQLException;
-import java.sql.Statement;
 import java.text.ParseException;
-import java.text.SimpleDateFormat;
 import java.util.ArrayList;
-import java.util.Date;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
 
 import javax.jdo.PersistenceManager;
 import javax.jdo.Query;
@@ -43,10 +42,13 @@ import javax.jdo.datastore.JDOConnection
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.math3.stat.StatUtils;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.Decimal;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Order;
@@ -65,9 +67,8 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor;
-import org.apache.hadoop.hive.metastore.parser.FilterLexer;
 import org.apache.hadoop.hive.serde.serdeConstants;
-import org.datanucleus.store.schema.SchemaTool;
+import org.datanucleus.store.rdbms.query.ForwardQueryResult;
 
 import com.google.common.collect.Lists;
 
@@ -97,7 +98,7 @@ class MetaStoreDirectSql {
    * Whether direct SQL can be used with the current datastore backing {@link #pm}.
    */
   private final boolean isCompatibleDatastore;
-
+  
   public MetaStoreDirectSql(PersistenceManager pm) {
     this.pm = pm;
     Transaction tx = pm.currentTransaction();
@@ -893,33 +894,247 @@ class MetaStoreDirectSql {
     return result;
   }
 
-  public List<ColumnStatisticsObj> aggrColStatsForPartitions(String dbName, String tableName,
+  public AggrStats aggrColStatsForPartitions(String dbName, String tableName,
       List<String> partNames, List<String> colNames) throws MetaException {
-    String qText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", "
-      + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), "
-      + "min(\"BIG_DECIMAL_LOW_VALUE\"), max(\"BIG_DECIMAL_HIGH_VALUE\"), sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), "
-      + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\") from \"PART_COL_STATS\""
-      + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in ("
-      + makeParams(colNames.size()) + ") AND \"PARTITION_NAME\" in ("
-      + makeParams(partNames.size()) + ") group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+    long partsFound = partsFoundForPartitions(dbName, tableName, partNames,
+        colNames);
+    List<ColumnStatisticsObj> stats = columnStatisticsObjForPartitions(dbName,
+        tableName, partNames, colNames, partsFound);
+    return new AggrStats(stats, partsFound);
+  }
 
+  private long partsFoundForPartitions(String dbName, String tableName,
+      List<String> partNames, List<String> colNames) throws MetaException {
+    long partsFound = 0;
     boolean doTrace = LOG.isDebugEnabled();
+    String qText = "select count(\"COLUMN_NAME\") from \"PART_COL_STATS\""
+        + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+        + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+        + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+        + " group by \"PARTITION_NAME\"";
     long start = doTrace ? System.nanoTime() : 0;
     Query query = pm.newQuery("javax.jdo.query.SQL", qText);
-    Object qResult = query.executeWithArray(prepareParams(dbName, tableName, partNames, colNames));
-    if (qResult == null) {
-      query.closeAll();
-      return Lists.newArrayList();
-    }
-    List<Object[]> list = ensureList(qResult);
-    List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(list.size());
-    for (Object[] row : list) {
-      colStats.add(prepareCSObj(row,0));
-    }
+    Object qResult = query.executeWithArray(prepareParams(dbName, tableName,
+        partNames, colNames));
     long end = doTrace ? System.nanoTime() : 0;
     timingTrace(doTrace, qText, start, end);
-    query.closeAll();
-    return colStats;
+    ForwardQueryResult fqr = (ForwardQueryResult) qResult;
+    Iterator<?> iter = fqr.iterator();
+    while (iter.hasNext()) {
+      if (StatObjectConverter.extractSqlLong(iter.next()) == colNames.size()) {
+        partsFound++;
+      }
+    }
+    return partsFound;
+  }
+
+  private List<ColumnStatisticsObj> columnStatisticsObjForPartitions(
+      String dbName, String tableName, List<String> partNames,
+      List<String> colNames, long partsFound) throws MetaException {
+    String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", "
+        + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), "
+        + "min(\"BIG_DECIMAL_LOW_VALUE\"), max(\"BIG_DECIMAL_HIGH_VALUE\"), sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), "
+        + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\") from \"PART_COL_STATS\""
+        + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? ";
+    String qText = null;
+    long start = 0;
+    long end = 0;
+    Query query = null;
+    boolean doTrace = LOG.isDebugEnabled();
+    Object qResult = null;
+    ForwardQueryResult fqr = null;
+    // Check if the status of all the columns of all the partitions exists
+    // Extrapolation is not needed.
+    if (partsFound == partNames.size()) {
+      qText = commonPrefix 
+          + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+          + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+          + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+      start = doTrace ? System.nanoTime() : 0;
+      query = pm.newQuery("javax.jdo.query.SQL", qText);
+      qResult = query.executeWithArray(prepareParams(dbName, tableName,
+          partNames, colNames));      
+      if (qResult == null) {
+        query.closeAll();
+        return Lists.newArrayList();
+      }
+      end = doTrace ? System.nanoTime() : 0;
+      timingTrace(doTrace, qText, start, end);
+      List<Object[]> list = ensureList(qResult);
+      List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(
+          list.size());
+      for (Object[] row : list) {
+        colStats.add(prepareCSObj(row, 0));
+      }
+      query.closeAll();
+      return colStats;
+    } else {
+      // Extrapolation is needed for some columns.
+      // In this case, at least a column status for a partition is missing.
+      // We need to extrapolate this partition based on the other partitions
+      List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(
+          colNames.size());
+      qText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", count(\"PARTITION_NAME\") "
+          + " from \"PART_COL_STATS\""
+          + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+          + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+          + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+          + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+      start = doTrace ? System.nanoTime() : 0;
+      query = pm.newQuery("javax.jdo.query.SQL", qText);
+      qResult = query.executeWithArray(prepareParams(dbName, tableName,
+          partNames, colNames));
+      end = doTrace ? System.nanoTime() : 0;
+      timingTrace(doTrace, qText, start, end);
+      if (qResult == null) {
+        query.closeAll();
+        return Lists.newArrayList();
+      }
+      List<String> noExtraColumnNames = new ArrayList<String>();
+      Map<String, String[]> extraColumnNameTypeParts = new HashMap<String, String[]>();
+      List<Object[]> list = ensureList(qResult);
+      for (Object[] row : list) {
+        String colName = (String) row[0];
+        String colType = (String) row[1];
+        // Extrapolation is not needed for this column if
+        // count(\"PARTITION_NAME\")==partNames.size()
+        // Or, extrapolation is not possible for this column if
+        // count(\"PARTITION_NAME\")<2
+        Long count = StatObjectConverter.extractSqlLong(row[2]);
+        if (count == partNames.size() || count < 2) {
+          noExtraColumnNames.add(colName);
+        } else {
+          extraColumnNameTypeParts.put(colName, new String[] { colType, String.valueOf(count) });
+        }
+      }
+      query.closeAll();
+      // Extrapolation is not needed for columns noExtraColumnNames
+      if (noExtraColumnNames.size() != 0) {
+        qText = commonPrefix 
+            + " and \"COLUMN_NAME\" in ("+ makeParams(noExtraColumnNames.size()) + ")"
+            + " and \"PARTITION_NAME\" in ("+ makeParams(partNames.size()) +")"
+            + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+        start = doTrace ? System.nanoTime() : 0;
+        query = pm.newQuery("javax.jdo.query.SQL", qText);
+        qResult = query.executeWithArray(prepareParams(dbName, tableName,
+            partNames, noExtraColumnNames));
+        if (qResult == null) {
+          query.closeAll();
+          return Lists.newArrayList();
+        }
+        list = ensureList(qResult);
+        for (Object[] row : list) {
+          colStats.add(prepareCSObj(row, 0));
+        }
+        end = doTrace ? System.nanoTime() : 0;
+        timingTrace(doTrace, qText, start, end);
+        query.closeAll();
+      }
+      // Extrapolation is needed for extraColumnNames.
+      // give a sequence number for all the partitions
+      if (extraColumnNameTypeParts.size() != 0) {
+        Map<String, Integer> indexMap = new HashMap<String, Integer>();
+        for (int index = 0; index < partNames.size(); index++) {
+          indexMap.put(partNames.get(index), index);
+        }
+        // get sum for all columns to reduce the number of queries
+        Map<String, Map<Integer, Object>> sumMap = new HashMap<String, Map<Integer, Object>>();
+        qText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\")"
+            + " from \"PART_COL_STATS\""
+            + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+            + " and \"COLUMN_NAME\" in (" +makeParams(extraColumnNameTypeParts.size())+ ")"
+            + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+            + " group by \"COLUMN_NAME\"";
+        start = doTrace ? System.nanoTime() : 0;
+        query = pm.newQuery("javax.jdo.query.SQL", qText);
+        List<String> extraColumnNames = new ArrayList<String>();
+        extraColumnNames.addAll(extraColumnNameTypeParts.keySet());
+        qResult = query.executeWithArray(prepareParams(dbName, tableName,
+            partNames, extraColumnNames));
+        if (qResult == null) {
+          query.closeAll();
+          return Lists.newArrayList();
+        }
+        list = ensureList(qResult);
+        // see the indexes for colstats in IExtrapolatePartStatus
+        Integer[] sumIndex = new Integer[] { 6, 10, 11 };
+        for (Object[] row : list) {
+          Map<Integer, Object> indexToObject = new HashMap<Integer, Object>();
+          for (int ind = 1; ind < row.length; ind++) {
+            indexToObject.put(sumIndex[ind - 1], row[ind]);
+          }
+          sumMap.put((String) row[0], indexToObject);
+        }
+        end = doTrace ? System.nanoTime() : 0;
+        timingTrace(doTrace, qText, start, end);
+        query.closeAll();
+        for (Map.Entry<String, String[]> entry : extraColumnNameTypeParts
+            .entrySet()) {
+          Object[] row = new Object[IExtrapolatePartStatus.colStatNames.length + 2];
+          String colName = entry.getKey();
+          String colType = entry.getValue()[0];
+          Long sumVal = Long.parseLong(entry.getValue()[1]);
+          // fill in colname
+          row[0] = colName;
+          // fill in coltype
+          row[1] = colType;
+          // use linear extrapolation. more complicated one can be added in the future.
+          IExtrapolatePartStatus extrapolateMethod = new LinearExtrapolatePartStatus();
+          // fill in colstatus
+          Integer[] index = IExtrapolatePartStatus.indexMaps.get(colType
+              .toLowerCase());
+          //if the colType is not the known type, long, double, etc, then get all index.
+          if (index == null) {
+            index = IExtrapolatePartStatus.indexMaps.get("default");
+          }
+          for (int colStatIndex : index) {
+            String colStatName = IExtrapolatePartStatus.colStatNames[colStatIndex];
+            // if the aggregation type is sum, we do a scale-up
+            if (IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Sum) {
+              Object o = sumMap.get(colName).get(colStatIndex);
+              if (o == null) {
+                row[2 + colStatIndex] = null;
+              } else {
+                Long val = StatObjectConverter.extractSqlLong(o);
+                row[2 + colStatIndex] = (Long) (val / sumVal * (partNames.size()));
+              }
+            } else {
+              // if the aggregation type is min/max, we extrapolate from the
+              // left/right borders
+              qText = "select \""
+                  + colStatName
+                  + "\",\"PARTITION_NAME\" from \"PART_COL_STATS\""
+                  + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?"
+                  + " and \"COLUMN_NAME\" in (" +makeParams(1)+ ")"
+                  + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+                  + " order by \'" + colStatName + "\'";
+              start = doTrace ? System.nanoTime() : 0;
+              query = pm.newQuery("javax.jdo.query.SQL", qText);
+              qResult = query.executeWithArray(prepareParams(dbName,
+                  tableName, partNames, Arrays.asList(colName)));
+              if (qResult == null) {
+                query.closeAll();
+                return Lists.newArrayList();
+              }
+              fqr = (ForwardQueryResult) qResult;
+              Object[] min = (Object[]) (fqr.get(0));
+              Object[] max = (Object[]) (fqr.get(fqr.size() - 1));
+              end = doTrace ? System.nanoTime() : 0;
+              timingTrace(doTrace, qText, start, end);
+              query.closeAll();
+              if (min[0] == null || max[0] == null) {
+                row[2 + colStatIndex] = null;
+              } else {
+                row[2 + colStatIndex] = extrapolateMethod.extrapolate(min, max,
+                    colStatIndex, indexMap);
+              }
+            }
+          }
+          colStats.add(prepareCSObj(row, 0));
+        }
+      }
+      return colStats;
+    }
   }
 
   private ColumnStatisticsObj prepareCSObj (Object[] row, int i) throws MetaException {
@@ -949,7 +1164,7 @@ class MetaStoreDirectSql {
 
     return params;
   }
-
+  
   public List<ColumnStatistics> getPartitionStats(String dbName, String tableName,
       List<String> partNames, List<String> colNames) throws MetaException {
     if (colNames.isEmpty() || partNames.isEmpty()) {

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Tue Sep  2 19:56:56 2014
@@ -45,9 +45,11 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
@@ -163,19 +165,25 @@ public class MetaStoreUtils {
     return updateUnpartitionedTableStatsFast(db, tbl, wh, madeDir, false);
   }
 
+  public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh,
+      boolean madeDir, boolean forceRecompute) throws MetaException {
+    return updateUnpartitionedTableStatsFast(tbl,
+        wh.getFileStatusesForUnpartitionedTable(db, tbl), madeDir, forceRecompute);
+  }
+
   /**
    * Updates the numFiles and totalSize parameters for the passed unpartitioned Table by querying
    * the warehouse if the passed Table does not already have values for these parameters.
-   * @param db
    * @param tbl
-   * @param wh
+   * @param fileStatus
    * @param newDir if true, the directory was just created and can be assumed to be empty
    * @param forceRecompute Recompute stats even if the passed Table already has
    * these parameters set
    * @return true if the stats were updated, false otherwise
    */
-  public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh,
-      boolean newDir, boolean forceRecompute) throws MetaException {
+  public static boolean updateUnpartitionedTableStatsFast(Table tbl,
+      FileStatus[] fileStatus, boolean newDir, boolean forceRecompute) throws MetaException {
+
     Map<String,String> params = tbl.getParameters();
     boolean updated = false;
     if (forceRecompute ||
@@ -188,7 +196,6 @@ public class MetaStoreUtils {
         // The table location already exists and may contain data.
         // Let's try to populate those stats that don't require full scan.
         LOG.info("Updating table stats fast for " + tbl.getTableName());
-        FileStatus[] fileStatus = wh.getFileStatusesForUnpartitionedTable(db, tbl);
         populateQuickStats(fileStatus, params);
         LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE));
         if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) {
@@ -1043,11 +1050,17 @@ public class MetaStoreUtils {
 
   public static void startMetaStore(final int port,
       final HadoopThriftAuthBridge bridge) throws Exception {
+    startMetaStore(port, bridge, new HiveConf(HMSHandler.class));
+  }
+
+  public static void startMetaStore(final int port,
+      final HadoopThriftAuthBridge bridge, final HiveConf hiveConf)
+      throws Exception{
     Thread thread = new Thread(new Runnable() {
       @Override
       public void run() {
         try {
-          HiveMetaStore.startMetaStore(port, bridge);
+          HiveMetaStore.startMetaStore(port, bridge, hiveConf);
         } catch (Throwable e) {
           LOG.error("Metastore Thrift Server threw an exception...",e);
         }
@@ -1057,6 +1070,7 @@ public class MetaStoreUtils {
     thread.start();
     loopUntilHMSReady(port);
   }
+
   /**
    * A simple connect test to make sure that the metastore is up
    * @throws Exception

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Tue Sep  2 19:56:56 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore
 
 import static org.apache.commons.lang.StringUtils.join;
 
+import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
@@ -34,6 +35,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
@@ -60,6 +62,7 @@ import org.apache.hadoop.hive.common.cla
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -87,6 +90,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.ResourceUri;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -128,6 +132,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
 import org.apache.hadoop.hive.metastore.parser.FilterLexer;
 import org.apache.hadoop.hive.metastore.parser.FilterParser;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
 import org.datanucleus.store.rdbms.exceptions.MissingTableException;
@@ -156,7 +161,7 @@ public class ObjectStore implements RawS
 
   private static final Map<String, Class> PINCLASSMAP;
   static {
-    Map<String, Class> map = new HashMap();
+    Map<String, Class> map = new HashMap<String, Class>();
     map.put("table", MTable.class);
     map.put("storagedescriptor", MStorageDescriptor.class);
     map.put("serdeinfo", MSerDeInfo.class);
@@ -251,6 +256,8 @@ public class ObjectStore implements RawS
       expressionProxy = createExpressionProxy(hiveConf);
       directSql = new MetaStoreDirectSql(pm);
     }
+    LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm +
+        " created in the thread with id: " + Thread.currentThread().getId());
   }
 
   /**
@@ -294,6 +301,16 @@ public class ObjectStore implements RawS
         }
       }
     }
+    // Password may no longer be in the conf, use getPassword()
+    try {
+      String passwd =
+          ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname);
+      if (passwd != null && !passwd.isEmpty()) {
+        prop.setProperty(HiveConf.ConfVars.METASTOREPWD.varname, passwd);
+      }
+    } catch (IOException err) {
+      throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err);
+    }
 
     if (LOG.isDebugEnabled()) {
       for (Entry<Object, Object> e : prop.entrySet()) {
@@ -342,6 +359,8 @@ public class ObjectStore implements RawS
   @Override
   public void shutdown() {
     if (pm != null) {
+      LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm +
+          " will be shutdown");
       pm.close();
     }
   }
@@ -1063,14 +1082,14 @@ public class ObjectStore implements RawS
     return keys;
   }
 
-  private SerDeInfo converToSerDeInfo(MSerDeInfo ms) throws MetaException {
+  private SerDeInfo convertToSerDeInfo(MSerDeInfo ms) throws MetaException {
     if (ms == null) {
       throw new MetaException("Invalid SerDeInfo object");
     }
     return new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
   }
 
-  private MSerDeInfo converToMSerDeInfo(SerDeInfo ms) throws MetaException {
+  private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException {
     if (ms == null) {
       throw new MetaException("Invalid SerDeInfo object");
     }
@@ -1102,7 +1121,7 @@ public class ObjectStore implements RawS
 
     StorageDescriptor sd = new StorageDescriptor(noFS ? null : convertToFieldSchemas(mFieldSchemas),
         msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd
-        .isCompressed(), msd.getNumBuckets(), converToSerDeInfo(msd
+        .isCompressed(), msd.getNumBuckets(), convertToSerDeInfo(msd
         .getSerDeInfo()), convertList(msd.getBucketCols()), convertToOrders(msd
         .getSortCols()), convertMap(msd.getParameters()));
     SkewedInfo skewedInfo = new SkewedInfo(convertList(msd.getSkewedColNames()),
@@ -1214,7 +1233,7 @@ public class ObjectStore implements RawS
     }
     return new MStorageDescriptor(mcd, sd
         .getLocation(), sd.getInputFormat(), sd.getOutputFormat(), sd
-        .isCompressed(), sd.getNumBuckets(), converToMSerDeInfo(sd
+        .isCompressed(), sd.getNumBuckets(), convertToMSerDeInfo(sd
         .getSerdeInfo()), sd.getBucketCols(),
         convertToMOrders(sd.getSortCols()), sd.getParameters(),
         (null == sd.getSkewedInfo()) ? null
@@ -2377,7 +2396,7 @@ public class ObjectStore implements RawS
    * Makes a JDO query filter string.
    * Makes a JDO query filter string for tables or partitions.
    * @param dbName Database name.
-   * @param table Table. If null, the query returned is over tables in a database.
+   * @param mtable Table. If null, the query returned is over tables in a database.
    *   If not null, the query returned is over partitions in a table.
    * @param filter The filter from which JDOQL filter will be made.
    * @param params Parameters for the filter. Some parameters may be added here.
@@ -5699,7 +5718,7 @@ public class ObjectStore implements RawS
       pm.makePersistent(mStatsObj);
     }
   }
-
+  
   @Override
   public boolean updateTableColumnStatistics(ColumnStatistics colStats)
     throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
@@ -5760,6 +5779,34 @@ public class ObjectStore implements RawS
     }
   }
 
+  @Override
+  public boolean updatePartitionColumnStatistics(SetPartitionsStatsRequest request)
+      throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+    boolean committed = false;
+    try {
+      openTransaction();
+      for (ColumnStatistics colStats : request.getColStats()) {
+        ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
+        statsDesc.setDbName(statsDesc.getDbName().toLowerCase());
+        statsDesc.setTableName(statsDesc.getTableName().toLowerCase());
+        List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+        for (ColumnStatisticsObj statsObj : statsObjs) {
+          statsObj.setColName(statsObj.getColName().toLowerCase());
+          statsObj.setColType(statsObj.getColType().toLowerCase());
+          MPartitionColumnStatistics mStatsObj = StatObjectConverter
+              .convertToMPartitionColumnStatistics(null, statsDesc, statsObj);
+          pm.makePersistent(mStatsObj);
+        }
+      }
+      committed = commitTransaction();
+      return committed;
+    } finally {
+      if (!committed) {
+        rollbackTransaction();
+      }
+    }
+  }
+
   private List<MTableColumnStatistics> getMTableColumnStatistics(
       Table table, List<String> colNames) throws MetaException {
     boolean committed = false;
@@ -5904,25 +5951,28 @@ public class ObjectStore implements RawS
 
 
   @Override
-  public List<ColumnStatisticsObj> get_aggr_stats_for(String dbName, String tblName,
+  public AggrStats get_aggr_stats_for(String dbName, String tblName,
       final List<String> partNames, final List<String> colNames) throws MetaException, NoSuchObjectException {
-
-    return new GetListHelper<ColumnStatisticsObj>(dbName, tblName, true, false) {
+    return new GetHelper<AggrStats>(dbName, tblName, true, false) {
       @Override
-      protected List<ColumnStatisticsObj> getSqlResult(
-          GetHelper<List<ColumnStatisticsObj>> ctx) throws MetaException {
-        return directSql.aggrColStatsForPartitions(dbName, tblName, partNames, colNames);
+      protected AggrStats getSqlResult(GetHelper<AggrStats> ctx)
+          throws MetaException {
+        return directSql.aggrColStatsForPartitions(dbName, tblName, partNames,
+            colNames);
       }
-
       @Override
-      protected List<ColumnStatisticsObj> getJdoResult(
-          GetHelper<List<ColumnStatisticsObj>> ctx) throws MetaException,
-          NoSuchObjectException {
-        // This is fast path for query optimizations, if we can find this info quickly using
+      protected AggrStats getJdoResult(GetHelper<AggrStats> ctx)
+          throws MetaException, NoSuchObjectException {
+        // This is fast path for query optimizations, if we can find this info
+        // quickly using
         // directSql, do it. No point in failing back to slow path here.
         throw new MetaException("Jdo path is not implemented for stats aggr.");
       }
-      }.run(true);
+      @Override
+      protected String describeResult() {
+        return null;
+      }
+    }.run(true);
   }
 
   private List<MPartitionColumnStatistics> getMPartitionColumnStatistics(
@@ -6158,7 +6208,7 @@ public class ObjectStore implements RawS
     boolean commited = false;
     long delCnt;
     LOG.debug("Begin executing cleanupEvents");
-    Long expiryTime = HiveConf.getLongVar(getConf(), ConfVars.METASTORE_EVENT_EXPIRY_DURATION) * 1000L;
+    Long expiryTime = HiveConf.getTimeVar(getConf(), ConfVars.METASTORE_EVENT_EXPIRY_DURATION, TimeUnit.MILLISECONDS);
     Long curTime = System.currentTimeMillis();
     try {
       openTransaction();

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Tue Sep  2 19:56:56 2014
@@ -26,6 +26,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -548,6 +550,10 @@ public interface RawStore extends Config
    */
   public List<String> getFunctions(String dbName, String pattern) throws MetaException;
 
-  public List<ColumnStatisticsObj> get_aggr_stats_for(String dbName, String tblName,
+  public AggrStats get_aggr_stats_for(String dbName, String tblName,
     List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
+
+  boolean updatePartitionColumnStatistics(
+      SetPartitionsStatsRequest request) throws NoSuchObjectException,
+      MetaException, InvalidObjectException, InvalidInputException;
 }

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java Tue Sep  2 19:56:56 2014
@@ -23,6 +23,7 @@ import java.lang.reflect.InvocationTarge
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.UndeclaredThrowableException;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.logging.Log;
@@ -80,8 +81,8 @@ public class RetryingHMSHandler implemen
     boolean gotNewConnectUrl = false;
     boolean reloadConf = HiveConf.getBoolVar(hiveConf,
         HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF);
-    int retryInterval = HiveConf.getIntVar(hiveConf,
-        HiveConf.ConfVars.HMSHANDLERINTERVAL);
+    long retryInterval = HiveConf.getTimeVar(hiveConf,
+        HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS);
     int retryLimit = HiveConf.getIntVar(hiveConf,
         HiveConf.ConfVars.HMSHANDLERATTEMPTS);
 

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java Tue Sep  2 19:56:56 2014
@@ -24,6 +24,7 @@ import java.lang.reflect.InvocationTarge
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.UndeclaredThrowableException;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -48,18 +49,18 @@ public class RetryingMetaStoreClient imp
 
   private final IMetaStoreClient base;
   private final int retryLimit;
-  private final int retryDelaySeconds;
+  private final long retryDelaySeconds;
 
 
 
   protected RetryingMetaStoreClient(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
       Class<? extends IMetaStoreClient> msClientClass) throws MetaException {
     this.retryLimit = hiveConf.getIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES);
-    this.retryDelaySeconds =
-        hiveConf.getIntVar(HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY);
+    this.retryDelaySeconds = hiveConf.getTimeVar(
+        HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
 
     reloginExpiringKeytabUser();
-    this.base = (IMetaStoreClient) MetaStoreUtils.newInstance(msClientClass, new Class[] {
+    this.base = MetaStoreUtils.newInstance(msClientClass, new Class[] {
         HiveConf.class, HiveMetaHookLoader.class}, new Object[] {hiveConf, hookLoader});
   }
 

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java Tue Sep  2 19:56:56 2014
@@ -65,20 +65,20 @@ public class StatObjectConverter {
      if (statsObj.getStatsData().isSetBooleanStats()) {
        BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats();
        mColStats.setBooleanStats(
-           boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null, 
+           boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null,
            boolStats.isSetNumFalses() ? boolStats.getNumFalses() : null,
            boolStats.isSetNumNulls() ? boolStats.getNumNulls() : null);
      } else if (statsObj.getStatsData().isSetLongStats()) {
        LongColumnStatsData longStats = statsObj.getStatsData().getLongStats();
        mColStats.setLongStats(
-           longStats.isSetNumNulls() ? longStats.getNumNulls() : null, 
+           longStats.isSetNumNulls() ? longStats.getNumNulls() : null,
            longStats.isSetNumDVs() ? longStats.getNumDVs() : null,
            longStats.isSetLowValue() ? longStats.getLowValue() : null,
            longStats.isSetHighValue() ? longStats.getHighValue() : null);
      } else if (statsObj.getStatsData().isSetDoubleStats()) {
        DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats();
        mColStats.setDoubleStats(
-           doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null, 
+           doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null,
            doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null,
            doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null,
            doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null);
@@ -87,20 +87,20 @@ public class StatObjectConverter {
        String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null;
        String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null;
        mColStats.setDecimalStats(
-           decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null, 
-           decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null, 
+           decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null,
+           decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null,
                low, high);
      } else if (statsObj.getStatsData().isSetStringStats()) {
        StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats();
        mColStats.setStringStats(
-           stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null, 
+           stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null,
            stringStats.isSetNumDVs() ? stringStats.getNumDVs() : null,
-           stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null, 
+           stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null,
            stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null);
      } else if (statsObj.getStatsData().isSetBinaryStats()) {
        BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats();
        mColStats.setBinaryStats(
-           binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null, 
+           binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null,
            binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null,
            binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null);
      }
@@ -109,9 +109,9 @@ public class StatObjectConverter {
 
   public static void setFieldsIntoOldStats(
       MTableColumnStatistics mStatsObj, MTableColumnStatistics oldStatsObj) {
-	  if (mStatsObj.getAvgColLen() != null) {
-	    oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
-	  }
+    if (mStatsObj.getAvgColLen() != null) {
+      oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
+    }
     if (mStatsObj.getLongHighValue() != null) {
       oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
     }
@@ -131,19 +131,19 @@ public class StatObjectConverter {
       oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue());
     }
     if (mStatsObj.getMaxColLen() != null) {
-  	  oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
+      oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
     }
     if (mStatsObj.getNumDVs() != null) {
-  	  oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
+      oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
     }
     if (mStatsObj.getNumFalses() != null) {
-  	  oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
+      oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
     }
     if (mStatsObj.getNumTrues() != null) {
-  	  oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
+      oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
     }
     if (mStatsObj.getNumNulls() != null) {
-  	  oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
+      oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
     }
     oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed());
   }
@@ -152,13 +152,13 @@ public class StatObjectConverter {
       MPartitionColumnStatistics mStatsObj, MPartitionColumnStatistics oldStatsObj) {
     if (mStatsObj.getAvgColLen() != null) {
           oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
-	  }
+    }
     if (mStatsObj.getLongHighValue() != null) {
-		  oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
-	  }
-	  if (mStatsObj.getDoubleHighValue() != null) {
-		  oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue());
-	  }
+      oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
+    }
+    if (mStatsObj.getDoubleHighValue() != null) {
+      oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue());
+    }
     oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed());
     if (mStatsObj.getLongLowValue() != null) {
       oldStatsObj.setLongLowValue(mStatsObj.getLongLowValue());
@@ -292,20 +292,20 @@ public class StatObjectConverter {
     if (statsObj.getStatsData().isSetBooleanStats()) {
       BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats();
       mColStats.setBooleanStats(
-          boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null, 
+          boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null,
           boolStats.isSetNumFalses() ? boolStats.getNumFalses() : null,
           boolStats.isSetNumNulls() ? boolStats.getNumNulls() : null);
     } else if (statsObj.getStatsData().isSetLongStats()) {
       LongColumnStatsData longStats = statsObj.getStatsData().getLongStats();
       mColStats.setLongStats(
-          longStats.isSetNumNulls() ? longStats.getNumNulls() : null, 
+          longStats.isSetNumNulls() ? longStats.getNumNulls() : null,
           longStats.isSetNumDVs() ? longStats.getNumDVs() : null,
           longStats.isSetLowValue() ? longStats.getLowValue() : null,
           longStats.isSetHighValue() ? longStats.getHighValue() : null);
     } else if (statsObj.getStatsData().isSetDoubleStats()) {
       DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats();
       mColStats.setDoubleStats(
-          doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null, 
+          doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null,
           doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null,
           doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null,
           doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null);
@@ -314,20 +314,20 @@ public class StatObjectConverter {
       String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null;
       String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null;
       mColStats.setDecimalStats(
-          decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null, 
-          decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null, 
+          decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null,
+          decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null,
               low, high);
     } else if (statsObj.getStatsData().isSetStringStats()) {
       StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats();
       mColStats.setStringStats(
-          stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null, 
+          stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null,
           stringStats.isSetNumDVs() ? stringStats.getNumDVs() : null,
-          stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null, 
+          stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null,
           stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null);
     } else if (statsObj.getStatsData().isSetBinaryStats()) {
       BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats();
       mColStats.setBinaryStats(
-          binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null, 
+          binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null,
           binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null,
           binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null);
     }