You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/08/24 05:43:57 UTC

svn commit: r1620103 [11/27] - in /hive/branches/spark: ./ accumulo-handler/ common/src/java/org/apache/hadoop/hive/ant/ common/src/java/org/apache/hadoop/hive/common/type/ common/src/test/org/apache/hadoop/hive/common/type/ data/files/ hcatalog/stream...

Modified: hive/branches/spark/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py (original)
+++ hive/branches/spark/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py Sun Aug 24 03:43:48 2014
@@ -4233,6 +4233,77 @@ class AggrStats:
   def __ne__(self, other):
     return not (self == other)
 
+class SetPartitionsStatsRequest:
+  """
+  Attributes:
+   - colStats
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1
+  )
+
+  def __init__(self, colStats=None,):
+    self.colStats = colStats
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.colStats = []
+          (_etype226, _size223) = iprot.readListBegin()
+          for _i227 in xrange(_size223):
+            _elem228 = ColumnStatistics()
+            _elem228.read(iprot)
+            self.colStats.append(_elem228)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('SetPartitionsStatsRequest')
+    if self.colStats is not None:
+      oprot.writeFieldBegin('colStats', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.colStats))
+      for iter229 in self.colStats:
+        iter229.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.colStats is None:
+      raise TProtocol.TProtocolException(message='Required field colStats is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class Schema:
   """
   Attributes:
@@ -4262,22 +4333,22 @@ class Schema:
       if fid == 1:
         if ftype == TType.LIST:
           self.fieldSchemas = []
-          (_etype226, _size223) = iprot.readListBegin()
-          for _i227 in xrange(_size223):
-            _elem228 = FieldSchema()
-            _elem228.read(iprot)
-            self.fieldSchemas.append(_elem228)
+          (_etype233, _size230) = iprot.readListBegin()
+          for _i234 in xrange(_size230):
+            _elem235 = FieldSchema()
+            _elem235.read(iprot)
+            self.fieldSchemas.append(_elem235)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.MAP:
           self.properties = {}
-          (_ktype230, _vtype231, _size229 ) = iprot.readMapBegin() 
-          for _i233 in xrange(_size229):
-            _key234 = iprot.readString();
-            _val235 = iprot.readString();
-            self.properties[_key234] = _val235
+          (_ktype237, _vtype238, _size236 ) = iprot.readMapBegin() 
+          for _i240 in xrange(_size236):
+            _key241 = iprot.readString();
+            _val242 = iprot.readString();
+            self.properties[_key241] = _val242
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4294,16 +4365,16 @@ class Schema:
     if self.fieldSchemas is not None:
       oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas))
-      for iter236 in self.fieldSchemas:
-        iter236.write(oprot)
+      for iter243 in self.fieldSchemas:
+        iter243.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.properties is not None:
       oprot.writeFieldBegin('properties', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
-      for kiter237,viter238 in self.properties.items():
-        oprot.writeString(kiter237)
-        oprot.writeString(viter238)
+      for kiter244,viter245 in self.properties.items():
+        oprot.writeString(kiter244)
+        oprot.writeString(viter245)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4350,11 +4421,11 @@ class EnvironmentContext:
       if fid == 1:
         if ftype == TType.MAP:
           self.properties = {}
-          (_ktype240, _vtype241, _size239 ) = iprot.readMapBegin() 
-          for _i243 in xrange(_size239):
-            _key244 = iprot.readString();
-            _val245 = iprot.readString();
-            self.properties[_key244] = _val245
+          (_ktype247, _vtype248, _size246 ) = iprot.readMapBegin() 
+          for _i250 in xrange(_size246):
+            _key251 = iprot.readString();
+            _val252 = iprot.readString();
+            self.properties[_key251] = _val252
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4371,9 +4442,9 @@ class EnvironmentContext:
     if self.properties is not None:
       oprot.writeFieldBegin('properties', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
-      for kiter246,viter247 in self.properties.items():
-        oprot.writeString(kiter246)
-        oprot.writeString(viter247)
+      for kiter253,viter254 in self.properties.items():
+        oprot.writeString(kiter253)
+        oprot.writeString(viter254)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4423,11 +4494,11 @@ class PartitionsByExprResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.partitions = []
-          (_etype251, _size248) = iprot.readListBegin()
-          for _i252 in xrange(_size248):
-            _elem253 = Partition()
-            _elem253.read(iprot)
-            self.partitions.append(_elem253)
+          (_etype258, _size255) = iprot.readListBegin()
+          for _i259 in xrange(_size255):
+            _elem260 = Partition()
+            _elem260.read(iprot)
+            self.partitions.append(_elem260)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4449,8 +4520,8 @@ class PartitionsByExprResult:
     if self.partitions is not None:
       oprot.writeFieldBegin('partitions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-      for iter254 in self.partitions:
-        iter254.write(oprot)
+      for iter261 in self.partitions:
+        iter261.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.hasUnknownPartitions is not None:
@@ -4619,11 +4690,11 @@ class TableStatsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.tableStats = []
-          (_etype258, _size255) = iprot.readListBegin()
-          for _i259 in xrange(_size255):
-            _elem260 = ColumnStatisticsObj()
-            _elem260.read(iprot)
-            self.tableStats.append(_elem260)
+          (_etype265, _size262) = iprot.readListBegin()
+          for _i266 in xrange(_size262):
+            _elem267 = ColumnStatisticsObj()
+            _elem267.read(iprot)
+            self.tableStats.append(_elem267)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4640,8 +4711,8 @@ class TableStatsResult:
     if self.tableStats is not None:
       oprot.writeFieldBegin('tableStats', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.tableStats))
-      for iter261 in self.tableStats:
-        iter261.write(oprot)
+      for iter268 in self.tableStats:
+        iter268.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4690,17 +4761,17 @@ class PartitionsStatsResult:
       if fid == 1:
         if ftype == TType.MAP:
           self.partStats = {}
-          (_ktype263, _vtype264, _size262 ) = iprot.readMapBegin() 
-          for _i266 in xrange(_size262):
-            _key267 = iprot.readString();
-            _val268 = []
-            (_etype272, _size269) = iprot.readListBegin()
-            for _i273 in xrange(_size269):
-              _elem274 = ColumnStatisticsObj()
-              _elem274.read(iprot)
-              _val268.append(_elem274)
+          (_ktype270, _vtype271, _size269 ) = iprot.readMapBegin() 
+          for _i273 in xrange(_size269):
+            _key274 = iprot.readString();
+            _val275 = []
+            (_etype279, _size276) = iprot.readListBegin()
+            for _i280 in xrange(_size276):
+              _elem281 = ColumnStatisticsObj()
+              _elem281.read(iprot)
+              _val275.append(_elem281)
             iprot.readListEnd()
-            self.partStats[_key267] = _val268
+            self.partStats[_key274] = _val275
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4717,11 +4788,11 @@ class PartitionsStatsResult:
     if self.partStats is not None:
       oprot.writeFieldBegin('partStats', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats))
-      for kiter275,viter276 in self.partStats.items():
-        oprot.writeString(kiter275)
-        oprot.writeListBegin(TType.STRUCT, len(viter276))
-        for iter277 in viter276:
-          iter277.write(oprot)
+      for kiter282,viter283 in self.partStats.items():
+        oprot.writeString(kiter282)
+        oprot.writeListBegin(TType.STRUCT, len(viter283))
+        for iter284 in viter283:
+          iter284.write(oprot)
         oprot.writeListEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
@@ -4787,10 +4858,10 @@ class TableStatsRequest:
       elif fid == 3:
         if ftype == TType.LIST:
           self.colNames = []
-          (_etype281, _size278) = iprot.readListBegin()
-          for _i282 in xrange(_size278):
-            _elem283 = iprot.readString();
-            self.colNames.append(_elem283)
+          (_etype288, _size285) = iprot.readListBegin()
+          for _i289 in xrange(_size285):
+            _elem290 = iprot.readString();
+            self.colNames.append(_elem290)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4815,8 +4886,8 @@ class TableStatsRequest:
     if self.colNames is not None:
       oprot.writeFieldBegin('colNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.colNames))
-      for iter284 in self.colNames:
-        oprot.writeString(iter284)
+      for iter291 in self.colNames:
+        oprot.writeString(iter291)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4888,20 +4959,20 @@ class PartitionsStatsRequest:
       elif fid == 3:
         if ftype == TType.LIST:
           self.colNames = []
-          (_etype288, _size285) = iprot.readListBegin()
-          for _i289 in xrange(_size285):
-            _elem290 = iprot.readString();
-            self.colNames.append(_elem290)
+          (_etype295, _size292) = iprot.readListBegin()
+          for _i296 in xrange(_size292):
+            _elem297 = iprot.readString();
+            self.colNames.append(_elem297)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.LIST:
           self.partNames = []
-          (_etype294, _size291) = iprot.readListBegin()
-          for _i295 in xrange(_size291):
-            _elem296 = iprot.readString();
-            self.partNames.append(_elem296)
+          (_etype301, _size298) = iprot.readListBegin()
+          for _i302 in xrange(_size298):
+            _elem303 = iprot.readString();
+            self.partNames.append(_elem303)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4926,15 +4997,15 @@ class PartitionsStatsRequest:
     if self.colNames is not None:
       oprot.writeFieldBegin('colNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.colNames))
-      for iter297 in self.colNames:
-        oprot.writeString(iter297)
+      for iter304 in self.colNames:
+        oprot.writeString(iter304)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.partNames is not None:
       oprot.writeFieldBegin('partNames', TType.LIST, 4)
       oprot.writeListBegin(TType.STRING, len(self.partNames))
-      for iter298 in self.partNames:
-        oprot.writeString(iter298)
+      for iter305 in self.partNames:
+        oprot.writeString(iter305)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4989,11 +5060,11 @@ class AddPartitionsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.partitions = []
-          (_etype302, _size299) = iprot.readListBegin()
-          for _i303 in xrange(_size299):
-            _elem304 = Partition()
-            _elem304.read(iprot)
-            self.partitions.append(_elem304)
+          (_etype309, _size306) = iprot.readListBegin()
+          for _i310 in xrange(_size306):
+            _elem311 = Partition()
+            _elem311.read(iprot)
+            self.partitions.append(_elem311)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5010,8 +5081,8 @@ class AddPartitionsResult:
     if self.partitions is not None:
       oprot.writeFieldBegin('partitions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-      for iter305 in self.partitions:
-        iter305.write(oprot)
+      for iter312 in self.partitions:
+        iter312.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5080,11 +5151,11 @@ class AddPartitionsRequest:
       elif fid == 3:
         if ftype == TType.LIST:
           self.parts = []
-          (_etype309, _size306) = iprot.readListBegin()
-          for _i310 in xrange(_size306):
-            _elem311 = Partition()
-            _elem311.read(iprot)
-            self.parts.append(_elem311)
+          (_etype316, _size313) = iprot.readListBegin()
+          for _i317 in xrange(_size313):
+            _elem318 = Partition()
+            _elem318.read(iprot)
+            self.parts.append(_elem318)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5119,8 +5190,8 @@ class AddPartitionsRequest:
     if self.parts is not None:
       oprot.writeFieldBegin('parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.parts))
-      for iter312 in self.parts:
-        iter312.write(oprot)
+      for iter319 in self.parts:
+        iter319.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.ifNotExists is not None:
@@ -5183,11 +5254,11 @@ class DropPartitionsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.partitions = []
-          (_etype316, _size313) = iprot.readListBegin()
-          for _i317 in xrange(_size313):
-            _elem318 = Partition()
-            _elem318.read(iprot)
-            self.partitions.append(_elem318)
+          (_etype323, _size320) = iprot.readListBegin()
+          for _i324 in xrange(_size320):
+            _elem325 = Partition()
+            _elem325.read(iprot)
+            self.partitions.append(_elem325)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5204,8 +5275,8 @@ class DropPartitionsResult:
     if self.partitions is not None:
       oprot.writeFieldBegin('partitions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-      for iter319 in self.partitions:
-        iter319.write(oprot)
+      for iter326 in self.partitions:
+        iter326.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5329,21 +5400,21 @@ class RequestPartsSpec:
       if fid == 1:
         if ftype == TType.LIST:
           self.names = []
-          (_etype323, _size320) = iprot.readListBegin()
-          for _i324 in xrange(_size320):
-            _elem325 = iprot.readString();
-            self.names.append(_elem325)
+          (_etype330, _size327) = iprot.readListBegin()
+          for _i331 in xrange(_size327):
+            _elem332 = iprot.readString();
+            self.names.append(_elem332)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.LIST:
           self.exprs = []
-          (_etype329, _size326) = iprot.readListBegin()
-          for _i330 in xrange(_size326):
-            _elem331 = DropPartitionsExpr()
-            _elem331.read(iprot)
-            self.exprs.append(_elem331)
+          (_etype336, _size333) = iprot.readListBegin()
+          for _i337 in xrange(_size333):
+            _elem338 = DropPartitionsExpr()
+            _elem338.read(iprot)
+            self.exprs.append(_elem338)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5360,15 +5431,15 @@ class RequestPartsSpec:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter332 in self.names:
-        oprot.writeString(iter332)
+      for iter339 in self.names:
+        oprot.writeString(iter339)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.exprs is not None:
       oprot.writeFieldBegin('exprs', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.exprs))
-      for iter333 in self.exprs:
-        iter333.write(oprot)
+      for iter340 in self.exprs:
+        iter340.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5695,11 +5766,11 @@ class Function:
       elif fid == 8:
         if ftype == TType.LIST:
           self.resourceUris = []
-          (_etype337, _size334) = iprot.readListBegin()
-          for _i338 in xrange(_size334):
-            _elem339 = ResourceUri()
-            _elem339.read(iprot)
-            self.resourceUris.append(_elem339)
+          (_etype344, _size341) = iprot.readListBegin()
+          for _i345 in xrange(_size341):
+            _elem346 = ResourceUri()
+            _elem346.read(iprot)
+            self.resourceUris.append(_elem346)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5744,8 +5815,8 @@ class Function:
     if self.resourceUris is not None:
       oprot.writeFieldBegin('resourceUris', TType.LIST, 8)
       oprot.writeListBegin(TType.STRUCT, len(self.resourceUris))
-      for iter340 in self.resourceUris:
-        iter340.write(oprot)
+      for iter347 in self.resourceUris:
+        iter347.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5904,11 +5975,11 @@ class GetOpenTxnsInfoResponse:
       elif fid == 2:
         if ftype == TType.LIST:
           self.open_txns = []
-          (_etype344, _size341) = iprot.readListBegin()
-          for _i345 in xrange(_size341):
-            _elem346 = TxnInfo()
-            _elem346.read(iprot)
-            self.open_txns.append(_elem346)
+          (_etype351, _size348) = iprot.readListBegin()
+          for _i352 in xrange(_size348):
+            _elem353 = TxnInfo()
+            _elem353.read(iprot)
+            self.open_txns.append(_elem353)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5929,8 +6000,8 @@ class GetOpenTxnsInfoResponse:
     if self.open_txns is not None:
       oprot.writeFieldBegin('open_txns', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.open_txns))
-      for iter347 in self.open_txns:
-        iter347.write(oprot)
+      for iter354 in self.open_txns:
+        iter354.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5989,10 +6060,10 @@ class GetOpenTxnsResponse:
       elif fid == 2:
         if ftype == TType.SET:
           self.open_txns = set()
-          (_etype351, _size348) = iprot.readSetBegin()
-          for _i352 in xrange(_size348):
-            _elem353 = iprot.readI64();
-            self.open_txns.add(_elem353)
+          (_etype358, _size355) = iprot.readSetBegin()
+          for _i359 in xrange(_size355):
+            _elem360 = iprot.readI64();
+            self.open_txns.add(_elem360)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -6013,8 +6084,8 @@ class GetOpenTxnsResponse:
     if self.open_txns is not None:
       oprot.writeFieldBegin('open_txns', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.open_txns))
-      for iter354 in self.open_txns:
-        oprot.writeI64(iter354)
+      for iter361 in self.open_txns:
+        oprot.writeI64(iter361)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6155,10 +6226,10 @@ class OpenTxnsResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.txn_ids = []
-          (_etype358, _size355) = iprot.readListBegin()
-          for _i359 in xrange(_size355):
-            _elem360 = iprot.readI64();
-            self.txn_ids.append(_elem360)
+          (_etype365, _size362) = iprot.readListBegin()
+          for _i366 in xrange(_size362):
+            _elem367 = iprot.readI64();
+            self.txn_ids.append(_elem367)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -6175,8 +6246,8 @@ class OpenTxnsResponse:
     if self.txn_ids is not None:
       oprot.writeFieldBegin('txn_ids', TType.LIST, 1)
       oprot.writeListBegin(TType.I64, len(self.txn_ids))
-      for iter361 in self.txn_ids:
-        oprot.writeI64(iter361)
+      for iter368 in self.txn_ids:
+        oprot.writeI64(iter368)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6472,11 +6543,11 @@ class LockRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.component = []
-          (_etype365, _size362) = iprot.readListBegin()
-          for _i366 in xrange(_size362):
-            _elem367 = LockComponent()
-            _elem367.read(iprot)
-            self.component.append(_elem367)
+          (_etype372, _size369) = iprot.readListBegin()
+          for _i373 in xrange(_size369):
+            _elem374 = LockComponent()
+            _elem374.read(iprot)
+            self.component.append(_elem374)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -6508,8 +6579,8 @@ class LockRequest:
     if self.component is not None:
       oprot.writeFieldBegin('component', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.component))
-      for iter368 in self.component:
-        iter368.write(oprot)
+      for iter375 in self.component:
+        iter375.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.txnid is not None:
@@ -7010,11 +7081,11 @@ class ShowLocksResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.locks = []
-          (_etype372, _size369) = iprot.readListBegin()
-          for _i373 in xrange(_size369):
-            _elem374 = ShowLocksResponseElement()
-            _elem374.read(iprot)
-            self.locks.append(_elem374)
+          (_etype379, _size376) = iprot.readListBegin()
+          for _i380 in xrange(_size376):
+            _elem381 = ShowLocksResponseElement()
+            _elem381.read(iprot)
+            self.locks.append(_elem381)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7031,8 +7102,8 @@ class ShowLocksResponse:
     if self.locks is not None:
       oprot.writeFieldBegin('locks', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.locks))
-      for iter375 in self.locks:
-        iter375.write(oprot)
+      for iter382 in self.locks:
+        iter382.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7230,20 +7301,20 @@ class HeartbeatTxnRangeResponse:
       if fid == 1:
         if ftype == TType.SET:
           self.aborted = set()
-          (_etype379, _size376) = iprot.readSetBegin()
-          for _i380 in xrange(_size376):
-            _elem381 = iprot.readI64();
-            self.aborted.add(_elem381)
+          (_etype386, _size383) = iprot.readSetBegin()
+          for _i387 in xrange(_size383):
+            _elem388 = iprot.readI64();
+            self.aborted.add(_elem388)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.SET:
           self.nosuch = set()
-          (_etype385, _size382) = iprot.readSetBegin()
-          for _i386 in xrange(_size382):
-            _elem387 = iprot.readI64();
-            self.nosuch.add(_elem387)
+          (_etype392, _size389) = iprot.readSetBegin()
+          for _i393 in xrange(_size389):
+            _elem394 = iprot.readI64();
+            self.nosuch.add(_elem394)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -7260,15 +7331,15 @@ class HeartbeatTxnRangeResponse:
     if self.aborted is not None:
       oprot.writeFieldBegin('aborted', TType.SET, 1)
       oprot.writeSetBegin(TType.I64, len(self.aborted))
-      for iter388 in self.aborted:
-        oprot.writeI64(iter388)
+      for iter395 in self.aborted:
+        oprot.writeI64(iter395)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     if self.nosuch is not None:
       oprot.writeFieldBegin('nosuch', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.nosuch))
-      for iter389 in self.nosuch:
-        oprot.writeI64(iter389)
+      for iter396 in self.nosuch:
+        oprot.writeI64(iter396)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7635,11 +7706,11 @@ class ShowCompactResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.compacts = []
-          (_etype393, _size390) = iprot.readListBegin()
-          for _i394 in xrange(_size390):
-            _elem395 = ShowCompactResponseElement()
-            _elem395.read(iprot)
-            self.compacts.append(_elem395)
+          (_etype400, _size397) = iprot.readListBegin()
+          for _i401 in xrange(_size397):
+            _elem402 = ShowCompactResponseElement()
+            _elem402.read(iprot)
+            self.compacts.append(_elem402)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7656,8 +7727,8 @@ class ShowCompactResponse:
     if self.compacts is not None:
       oprot.writeFieldBegin('compacts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.compacts))
-      for iter396 in self.compacts:
-        iter396.write(oprot)
+      for iter403 in self.compacts:
+        iter403.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()

Modified: hive/branches/spark/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb (original)
+++ hive/branches/spark/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb Sun Aug 24 03:43:48 2014
@@ -1028,6 +1028,23 @@ class AggrStats
   ::Thrift::Struct.generate_accessors self
 end
 
+class SetPartitionsStatsRequest
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  COLSTATS = 1
+
+  FIELDS = {
+    COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class Schema
   include ::Thrift::Struct, ::Thrift::Struct_Union
   FIELDSCHEMAS = 1

Modified: hive/branches/spark/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original)
+++ hive/branches/spark/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Sun Aug 24 03:43:48 2014
@@ -1279,6 +1279,25 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_aggr_stats_for failed: unknown result')
     end
 
+    def set_aggr_stats_for(request)
+      send_set_aggr_stats_for(request)
+      return recv_set_aggr_stats_for()
+    end
+
+    def send_set_aggr_stats_for(request)
+      send_message('set_aggr_stats_for', Set_aggr_stats_for_args, :request => request)
+    end
+
+    def recv_set_aggr_stats_for()
+      result = receive_message(Set_aggr_stats_for_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      raise result.o4 unless result.o4.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'set_aggr_stats_for failed: unknown result')
+    end
+
     def delete_partition_column_statistics(db_name, tbl_name, part_name, col_name)
       send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name)
       return recv_delete_partition_column_statistics()
@@ -2907,6 +2926,23 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'get_aggr_stats_for', seqid)
     end
 
+    def process_set_aggr_stats_for(seqid, iprot, oprot)
+      args = read_args(iprot, Set_aggr_stats_for_args)
+      result = Set_aggr_stats_for_result.new()
+      begin
+        result.success = @handler.set_aggr_stats_for(args.request)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::InvalidObjectException => o2
+        result.o2 = o2
+      rescue ::MetaException => o3
+        result.o3 = o3
+      rescue ::InvalidInputException => o4
+        result.o4 = o4
+      end
+      write_result(result, oprot, 'set_aggr_stats_for', seqid)
+    end
+
     def process_delete_partition_column_statistics(seqid, iprot, oprot)
       args = read_args(iprot, Delete_partition_column_statistics_args)
       result = Delete_partition_column_statistics_result.new()
@@ -6264,6 +6300,46 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Set_aggr_stats_for_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    REQUEST = 1
+
+    FIELDS = {
+      REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::SetPartitionsStatsRequest}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Set_aggr_stats_for_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+    O3 = 3
+    O4 = 4
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success'},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException},
+      O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Delete_partition_column_statistics_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java Sun Aug 24 03:43:48 2014
@@ -222,10 +222,10 @@ public class HiveAlterHandler implements
       if (success && moveData) {
         // change the file name in hdfs
         // check that src exists otherwise there is no need to copy the data
+        // rename the src to destination
         try {
-          if (srcFs.exists(srcPath)) {
-            // rename the src to destination
-            srcFs.rename(srcPath, destPath);
+          if (srcFs.exists(srcPath) && !srcFs.rename(srcPath, destPath)) {
+            throw new IOException("Renaming " + srcPath + " to " + destPath + " is failed");
           }
         } catch (IOException e) {
           boolean revertMetaDataTransaction = false;

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Sun Aug 24 03:43:48 2014
@@ -122,6 +122,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
@@ -5023,12 +5024,8 @@ public class HiveMetaStore extends Thrif
       startFunction("get_aggr_stats_for: db=" + request.getDbName() + " table=" + request.getTblName());
       AggrStats aggrStats = null;
       try {
-        //TODO: We are setting partitionCnt for which we were able to retrieve stats same as
-        // incoming number from request. This is not correct, but currently no users of this api
-        // rely on this. Only, current user StatsAnnotation don't care for it. StatsOptimizer
-        // will care for it, so before StatsOptimizer begin using it, we need to fix this.
         aggrStats = new AggrStats(getMS().get_aggr_stats_for(request.getDbName(),
-          request.getTblName(), request.getPartNames(), request.getColNames()), request.getPartNames().size());
+          request.getTblName(), request.getPartNames(), request.getColNames()));
         return aggrStats;
       } finally {
           endFunction("get_partitions_statistics_req: ", aggrStats == null, null, request.getTblName());
@@ -5036,6 +5033,17 @@ public class HiveMetaStore extends Thrif
 
     }
 
+    @Override
+    public boolean set_aggr_stats_for(SetPartitionsStatsRequest request)
+        throws NoSuchObjectException, InvalidObjectException, MetaException,
+        InvalidInputException, TException {
+      boolean ret = true;
+      for (ColumnStatistics colStats : request.getColStats()) {
+        ret = ret && update_partition_column_statistics(colStats);
+      }
+      return ret;
+    }
+
   }
 
   public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException {

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Sun Aug 24 03:43:48 2014
@@ -105,6 +105,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
@@ -1264,6 +1265,13 @@ public class HiveMetaStoreClient impleme
     InvalidInputException{
     return client.update_partition_column_statistics(statsObj);
   }
+  
+  /** {@inheritDoc} */
+  public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
+    throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+    InvalidInputException{
+    return client.set_aggr_stats_for(request);
+  }
 
   /** {@inheritDoc} */
   @Override

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Sun Aug 24 03:43:48 2014
@@ -65,6 +65,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
@@ -1298,4 +1299,6 @@ public interface IMetaStoreClient {
 
   public AggrStats getAggrColStatsFor(String dbName, String tblName,
       List<String> colNames, List<String> partName)  throws NoSuchObjectException, MetaException, TException;
+
+  boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException;
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java Sun Aug 24 03:43:48 2014
@@ -21,19 +21,18 @@ package org.apache.hadoop.hive.metastore
 import static org.apache.commons.lang.StringUtils.join;
 import static org.apache.commons.lang.StringUtils.repeat;
 
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
 import java.sql.Connection;
 import java.sql.SQLException;
-import java.sql.Statement;
 import java.text.ParseException;
-import java.text.SimpleDateFormat;
 import java.util.ArrayList;
-import java.util.Date;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
 
 import javax.jdo.PersistenceManager;
 import javax.jdo.Query;
@@ -43,10 +42,12 @@ import javax.jdo.datastore.JDOConnection
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.Decimal;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Order;
@@ -65,9 +66,8 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor;
-import org.apache.hadoop.hive.metastore.parser.FilterLexer;
 import org.apache.hadoop.hive.serde.serdeConstants;
-import org.datanucleus.store.schema.SchemaTool;
+import org.datanucleus.store.rdbms.query.ForwardQueryResult;
 
 import com.google.common.collect.Lists;
 
@@ -97,7 +97,7 @@ class MetaStoreDirectSql {
    * Whether direct SQL can be used with the current datastore backing {@link #pm}.
    */
   private final boolean isCompatibleDatastore;
-
+  
   public MetaStoreDirectSql(PersistenceManager pm) {
     this.pm = pm;
     Transaction tx = pm.currentTransaction();
@@ -893,33 +893,247 @@ class MetaStoreDirectSql {
     return result;
   }
 
-  public List<ColumnStatisticsObj> aggrColStatsForPartitions(String dbName, String tableName,
+  public AggrStats aggrColStatsForPartitions(String dbName, String tableName,
       List<String> partNames, List<String> colNames) throws MetaException {
-    String qText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", "
-      + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), "
-      + "min(\"BIG_DECIMAL_LOW_VALUE\"), max(\"BIG_DECIMAL_HIGH_VALUE\"), sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), "
-      + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\") from \"PART_COL_STATS\""
-      + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in ("
-      + makeParams(colNames.size()) + ") AND \"PARTITION_NAME\" in ("
-      + makeParams(partNames.size()) + ") group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+    long partsFound = partsFoundForPartitions(dbName, tableName, partNames,
+        colNames);
+    List<ColumnStatisticsObj> stats = columnStatisticsObjForPartitions(dbName,
+        tableName, partNames, colNames, partsFound);
+    return new AggrStats(stats, partsFound);
+  }
 
+  private long partsFoundForPartitions(String dbName, String tableName,
+      List<String> partNames, List<String> colNames) throws MetaException {
+    long partsFound = 0;
     boolean doTrace = LOG.isDebugEnabled();
+    String qText = "select count(\"COLUMN_NAME\") from \"PART_COL_STATS\""
+        + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+        + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+        + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+        + " group by \"PARTITION_NAME\"";
     long start = doTrace ? System.nanoTime() : 0;
     Query query = pm.newQuery("javax.jdo.query.SQL", qText);
-    Object qResult = query.executeWithArray(prepareParams(dbName, tableName, partNames, colNames));
-    if (qResult == null) {
-      query.closeAll();
-      return Lists.newArrayList();
-    }
-    List<Object[]> list = ensureList(qResult);
-    List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(list.size());
-    for (Object[] row : list) {
-      colStats.add(prepareCSObj(row,0));
-    }
+    Object qResult = query.executeWithArray(prepareParams(dbName, tableName,
+        partNames, colNames));
     long end = doTrace ? System.nanoTime() : 0;
     timingTrace(doTrace, qText, start, end);
-    query.closeAll();
-    return colStats;
+    ForwardQueryResult fqr = (ForwardQueryResult) qResult;
+    List<Integer> colnumbers = new ArrayList<Integer>();
+    colnumbers.addAll(fqr);
+    for (Integer colnumber : colnumbers) {
+      if (colnumber == colNames.size())
+        partsFound++;
+    }
+    return partsFound;
+  }
+
+  private List<ColumnStatisticsObj> columnStatisticsObjForPartitions(
+      String dbName, String tableName, List<String> partNames,
+      List<String> colNames, long partsFound) throws MetaException {
+    String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", "
+        + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), "
+        + "min(\"BIG_DECIMAL_LOW_VALUE\"), max(\"BIG_DECIMAL_HIGH_VALUE\"), sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), "
+        + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\") from \"PART_COL_STATS\""
+        + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? ";
+    String qText = null;
+    long start = 0;
+    long end = 0;
+    Query query = null;
+    boolean doTrace = LOG.isDebugEnabled();
+    Object qResult = null;
+    ForwardQueryResult fqr = null;
+    // Check if the status of all the columns of all the partitions exists
+    // Extrapolation is not needed.
+    if (partsFound == partNames.size()) {
+      qText = commonPrefix 
+          + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+          + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+          + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+      start = doTrace ? System.nanoTime() : 0;
+      query = pm.newQuery("javax.jdo.query.SQL", qText);
+      qResult = query.executeWithArray(prepareParams(dbName, tableName,
+          partNames, colNames));      
+      if (qResult == null) {
+        query.closeAll();
+        return Lists.newArrayList();
+      }
+      end = doTrace ? System.nanoTime() : 0;
+      timingTrace(doTrace, qText, start, end);
+      List<Object[]> list = ensureList(qResult);
+      List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(
+          list.size());
+      for (Object[] row : list) {
+        colStats.add(prepareCSObj(row, 0));
+      }
+      query.closeAll();
+      return colStats;
+    } else {
+      // Extrapolation is needed for some columns.
+      // In this case, at least a column status for a partition is missing.
+      // We need to extrapolate this partition based on the other partitions
+      List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(
+          colNames.size());
+      qText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", count(\"PARTITION_NAME\") "
+          + " from \"PART_COL_STATS\""
+          + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+          + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+          + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+          + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+      start = doTrace ? System.nanoTime() : 0;
+      query = pm.newQuery("javax.jdo.query.SQL", qText);
+      qResult = query.executeWithArray(prepareParams(dbName, tableName,
+          partNames, colNames));
+      end = doTrace ? System.nanoTime() : 0;
+      timingTrace(doTrace, qText, start, end);
+      if (qResult == null) {
+        query.closeAll();
+        return Lists.newArrayList();
+      }
+      List<String> noExtraColumnNames = new ArrayList<String>();
+      Map<String, String[]> extraColumnNameTypeParts = new HashMap<String, String[]>();
+      List<Object[]> list = ensureList(qResult);
+      for (Object[] row : list) {
+        String colName = (String) row[0];
+        String colType = (String) row[1];
+        if ((Integer) row[2] == partNames.size() || (Integer) row[2] < 2) {
+          // Extrapolation is not needed for this column if
+          // count(\"PARTITION_NAME\")==partNames.size()
+          // Or, extrapolation is not possible for this column if
+          // count(\"PARTITION_NAME\")<2
+          noExtraColumnNames.add(colName);
+        } else {
+          extraColumnNameTypeParts.put(colName,
+              new String[] { colType, String.valueOf((Integer) row[2]) });
+        }
+      }
+      query.closeAll();
+      // Extrapolation is not needed for columns noExtraColumnNames
+      if (noExtraColumnNames.size() != 0) {
+        qText = commonPrefix 
+            + " and \"COLUMN_NAME\" in ("+ makeParams(noExtraColumnNames.size()) + ")"
+            + " and \"PARTITION_NAME\" in ("+ makeParams(partNames.size()) +")"
+            + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+        start = doTrace ? System.nanoTime() : 0;
+        query = pm.newQuery("javax.jdo.query.SQL", qText);
+        qResult = query.executeWithArray(prepareParams(dbName, tableName,
+            partNames, noExtraColumnNames));
+        if (qResult == null) {
+          query.closeAll();
+          return Lists.newArrayList();
+        }
+        list = ensureList(qResult);
+        for (Object[] row : list) {
+          colStats.add(prepareCSObj(row, 0));
+        }
+        end = doTrace ? System.nanoTime() : 0;
+        timingTrace(doTrace, qText, start, end);
+        query.closeAll();
+      }
+      // Extrapolation is needed for extraColumnNames.
+      // give a sequence number for all the partitions
+      if (extraColumnNameTypeParts.size() != 0) {
+        Map<String, Integer> indexMap = new HashMap<String, Integer>();
+        for (int index = 0; index < partNames.size(); index++) {
+          indexMap.put(partNames.get(index), index);
+        }
+        // get sum for all columns to reduce the number of queries
+        Map<String, Map<Integer, Object>> sumMap = new HashMap<String, Map<Integer, Object>>();
+        qText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\")"
+            + " from \"PART_COL_STATS\""
+            + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+            + " and \"COLUMN_NAME\" in (" +makeParams(extraColumnNameTypeParts.size())+ ")"
+            + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+            + " group by \"COLUMN_NAME\"";
+        start = doTrace ? System.nanoTime() : 0;
+        query = pm.newQuery("javax.jdo.query.SQL", qText);
+        List<String> extraColumnNames = new ArrayList<String>();
+        extraColumnNames.addAll(extraColumnNameTypeParts.keySet());
+        qResult = query.executeWithArray(prepareParams(dbName, tableName,
+            partNames, extraColumnNames));
+        if (qResult == null) {
+          query.closeAll();
+          return Lists.newArrayList();
+        }
+        list = ensureList(qResult);
+        // see the indexes for colstats in IExtrapolatePartStatus
+        Integer[] sumIndex = new Integer[] { 6, 10, 11 };
+        for (Object[] row : list) {
+          Map<Integer, Object> indexToObject = new HashMap<Integer, Object>();
+          for (int ind = 1; ind < row.length; ind++) {
+            indexToObject.put(sumIndex[ind - 1], row[ind]);
+          }
+          sumMap.put((String) row[0], indexToObject);
+        }
+        end = doTrace ? System.nanoTime() : 0;
+        timingTrace(doTrace, qText, start, end);
+        query.closeAll();
+        for (Map.Entry<String, String[]> entry : extraColumnNameTypeParts
+            .entrySet()) {
+          Object[] row = new Object[IExtrapolatePartStatus.colStatNames.length + 2];
+          String colName = entry.getKey();
+          String colType = entry.getValue()[0];
+          Long sumVal = Long.parseLong(entry.getValue()[1]);
+          // fill in colname
+          row[0] = colName;
+          // fill in coltype
+          row[1] = colType;
+          // use linear extrapolation. more complicated one can be added in the future.
+          IExtrapolatePartStatus extrapolateMethod = new LinearExtrapolatePartStatus();
+          // fill in colstatus
+          Integer[] index = IExtrapolatePartStatus.indexMaps.get(colType
+              .toLowerCase());
+          //if the colType is not the known type, long, double, etc, then get all index.
+          if (index == null) {
+            index = IExtrapolatePartStatus.indexMaps.get("default");
+          }
+          for (int colStatIndex : index) {
+            String colStatName = IExtrapolatePartStatus.colStatNames[colStatIndex];
+            // if the aggregation type is sum, we do a scale-up
+            if (IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Sum) {
+              Long val = (Long) sumMap.get(colName).get(colStatIndex);
+              if (val == null) {
+                row[2 + colStatIndex] = null;
+              } else {
+                row[2 + colStatIndex] = (Long) (val / sumVal * (partNames
+                    .size()));
+              }
+            } else {
+              // if the aggregation type is min/max, we extrapolate from the
+              // left/right borders
+              qText = "select \""
+                  + colStatName
+                  + "\",\"PARTITION_NAME\" from \"PART_COL_STATS\""
+                  + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?"
+                  + " and \"COLUMN_NAME\" in (" +makeParams(1)+ ")"
+                  + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+                  + " order by \'" + colStatName + "\'";
+              start = doTrace ? System.nanoTime() : 0;
+              query = pm.newQuery("javax.jdo.query.SQL", qText);
+              qResult = query.executeWithArray(prepareParams(dbName,
+                  tableName, partNames, Arrays.asList(colName)));
+              if (qResult == null) {
+                query.closeAll();
+                return Lists.newArrayList();
+              }
+              fqr = (ForwardQueryResult) qResult;
+              Object[] min = (Object[]) (fqr.get(0));
+              Object[] max = (Object[]) (fqr.get(fqr.size() - 1));
+              end = doTrace ? System.nanoTime() : 0;
+              timingTrace(doTrace, qText, start, end);
+              query.closeAll();
+              if (min[0] == null || max[0] == null) {
+                row[2 + colStatIndex] = null;
+              } else {
+                row[2 + colStatIndex] = extrapolateMethod.extrapolate(min, max,
+                    colStatIndex, indexMap);
+              }
+            }
+          }
+          colStats.add(prepareCSObj(row, 0));
+        }
+      }
+      return colStats;
+    }
   }
 
   private ColumnStatisticsObj prepareCSObj (Object[] row, int i) throws MetaException {
@@ -949,7 +1163,7 @@ class MetaStoreDirectSql {
 
     return params;
   }
-
+  
   public List<ColumnStatistics> getPartitionStats(String dbName, String tableName,
       List<String> partNames, List<String> colNames) throws MetaException {
     if (colNames.isEmpty() || partNames.isEmpty()) {

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Sun Aug 24 03:43:48 2014
@@ -60,6 +60,7 @@ import org.apache.hadoop.hive.common.cla
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -2564,13 +2565,13 @@ public class ObjectStore implements RawS
       }
 
       // For now only alter name, owner, paramters, cols, bucketcols are allowed
+      oldt.setDatabase(newt.getDatabase());
       oldt.setTableName(newt.getTableName().toLowerCase());
       oldt.setParameters(newt.getParameters());
       oldt.setOwner(newt.getOwner());
       // Fully copy over the contents of the new SD into the old SD,
       // so we don't create an extra SD in the metastore db that has no references.
       copyMSD(newt.getSd(), oldt.getSd());
-      oldt.setDatabase(newt.getDatabase());
       oldt.setRetention(newt.getRetention());
       oldt.setPartitionKeys(newt.getPartitionKeys());
       oldt.setTableType(newt.getTableType());
@@ -5904,25 +5905,28 @@ public class ObjectStore implements RawS
 
 
   @Override
-  public List<ColumnStatisticsObj> get_aggr_stats_for(String dbName, String tblName,
+  public AggrStats get_aggr_stats_for(String dbName, String tblName,
       final List<String> partNames, final List<String> colNames) throws MetaException, NoSuchObjectException {
-
-    return new GetListHelper<ColumnStatisticsObj>(dbName, tblName, true, false) {
+    return new GetHelper<AggrStats>(dbName, tblName, true, false) {
       @Override
-      protected List<ColumnStatisticsObj> getSqlResult(
-          GetHelper<List<ColumnStatisticsObj>> ctx) throws MetaException {
-        return directSql.aggrColStatsForPartitions(dbName, tblName, partNames, colNames);
+      protected AggrStats getSqlResult(GetHelper<AggrStats> ctx)
+          throws MetaException {
+        return directSql.aggrColStatsForPartitions(dbName, tblName, partNames,
+            colNames);
       }
-
       @Override
-      protected List<ColumnStatisticsObj> getJdoResult(
-          GetHelper<List<ColumnStatisticsObj>> ctx) throws MetaException,
-          NoSuchObjectException {
-        // This is fast path for query optimizations, if we can find this info quickly using
+      protected AggrStats getJdoResult(GetHelper<AggrStats> ctx)
+          throws MetaException, NoSuchObjectException {
+        // This is fast path for query optimizations, if we can find this info
+        // quickly using
         // directSql, do it. No point in failing back to slow path here.
         throw new MetaException("Jdo path is not implemented for stats aggr.");
       }
-      }.run(true);
+      @Override
+      protected String describeResult() {
+        return null;
+      }
+    }.run(true);
   }
 
   private List<MPartitionColumnStatistics> getMPartitionColumnStatistics(

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Sun Aug 24 03:43:48 2014
@@ -26,6 +26,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -548,6 +549,6 @@ public interface RawStore extends Config
    */
   public List<String> getFunctions(String dbName, String pattern) throws MetaException;
 
-  public List<ColumnStatisticsObj> get_aggr_stats_for(String dbName, String tblName,
+  public AggrStats get_aggr_stats_for(String dbName, String tblName,
     List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java Sun Aug 24 03:43:48 2014
@@ -49,121 +49,135 @@ public class TxnDbUtil {
     // intended for creating derby databases, and thus will inexorably get
     // out of date with it.  I'm open to any suggestions on how to make this
     // read the file in a build friendly way.
-    Connection conn = getConnection();
-    Statement s = conn.createStatement();
-    s.execute("CREATE TABLE TXNS (" +
-        "  TXN_ID bigint PRIMARY KEY," +
-        "  TXN_STATE char(1) NOT NULL," +
-        "  TXN_STARTED bigint NOT NULL," +
-        "  TXN_LAST_HEARTBEAT bigint NOT NULL," +
-        "  TXN_USER varchar(128) NOT NULL," +
-        "  TXN_HOST varchar(128) NOT NULL)");
-
-        s.execute("CREATE TABLE TXN_COMPONENTS (" +
-        "  TC_TXNID bigint REFERENCES TXNS (TXN_ID)," +
-        "  TC_DATABASE varchar(128) NOT NULL," +
-        "  TC_TABLE varchar(128)," +
-        "  TC_PARTITION varchar(767))");
-    s.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
-        "  CTC_TXNID bigint," +
-        "  CTC_DATABASE varchar(128) NOT NULL," +
-        "  CTC_TABLE varchar(128)," +
-        "  CTC_PARTITION varchar(767))");
-    s.execute("CREATE TABLE NEXT_TXN_ID (" +
-        "  NTXN_NEXT bigint NOT NULL)");
-    s.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
-    s.execute("CREATE TABLE HIVE_LOCKS (" +
-        " HL_LOCK_EXT_ID bigint NOT NULL," +
-        " HL_LOCK_INT_ID bigint NOT NULL," +
-        " HL_TXNID bigint," +
-        " HL_DB varchar(128) NOT NULL," +
-        " HL_TABLE varchar(128)," +
-        " HL_PARTITION varchar(767)," +
-        " HL_LOCK_STATE char(1) NOT NULL," +
-        " HL_LOCK_TYPE char(1) NOT NULL," +
-        " HL_LAST_HEARTBEAT bigint NOT NULL," +
-        " HL_ACQUIRED_AT bigint," +
-        " HL_USER varchar(128) NOT NULL," +
-        " HL_HOST varchar(128) NOT NULL," +
-        " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
-    s.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
-
-    s.execute("CREATE TABLE NEXT_LOCK_ID (" +
-        " NL_NEXT bigint NOT NULL)");
-    s.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
-
-    s.execute("CREATE TABLE COMPACTION_QUEUE (" +
-        " CQ_ID bigint PRIMARY KEY," +
-        " CQ_DATABASE varchar(128) NOT NULL," +
-        " CQ_TABLE varchar(128) NOT NULL," +
-        " CQ_PARTITION varchar(767)," +
-        " CQ_STATE char(1) NOT NULL," +
-        " CQ_TYPE char(1) NOT NULL," +
-        " CQ_WORKER_ID varchar(128)," +
-        " CQ_START bigint," +
-        " CQ_RUN_AS varchar(128))");
-
-    s.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
-    s.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
-
-    conn.commit();
-    conn.close();
+    Connection conn = null;
+    boolean committed = false;
+    try {
+      conn = getConnection();
+      Statement s = conn.createStatement();
+      s.execute("CREATE TABLE TXNS (" +
+          "  TXN_ID bigint PRIMARY KEY," +
+          "  TXN_STATE char(1) NOT NULL," +
+          "  TXN_STARTED bigint NOT NULL," +
+          "  TXN_LAST_HEARTBEAT bigint NOT NULL," +
+          "  TXN_USER varchar(128) NOT NULL," +
+          "  TXN_HOST varchar(128) NOT NULL)");
+
+      s.execute("CREATE TABLE TXN_COMPONENTS (" +
+      "  TC_TXNID bigint REFERENCES TXNS (TXN_ID)," +
+      "  TC_DATABASE varchar(128) NOT NULL," +
+      "  TC_TABLE varchar(128)," +
+      "  TC_PARTITION varchar(767))");
+      s.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
+          "  CTC_TXNID bigint," +
+          "  CTC_DATABASE varchar(128) NOT NULL," +
+          "  CTC_TABLE varchar(128)," +
+          "  CTC_PARTITION varchar(767))");
+      s.execute("CREATE TABLE NEXT_TXN_ID (" +
+          "  NTXN_NEXT bigint NOT NULL)");
+      s.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
+      s.execute("CREATE TABLE HIVE_LOCKS (" +
+          " HL_LOCK_EXT_ID bigint NOT NULL," +
+          " HL_LOCK_INT_ID bigint NOT NULL," +
+          " HL_TXNID bigint," +
+          " HL_DB varchar(128) NOT NULL," +
+          " HL_TABLE varchar(128)," +
+          " HL_PARTITION varchar(767)," +
+          " HL_LOCK_STATE char(1) NOT NULL," +
+          " HL_LOCK_TYPE char(1) NOT NULL," +
+          " HL_LAST_HEARTBEAT bigint NOT NULL," +
+          " HL_ACQUIRED_AT bigint," +
+          " HL_USER varchar(128) NOT NULL," +
+          " HL_HOST varchar(128) NOT NULL," +
+          " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
+      s.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
+
+      s.execute("CREATE TABLE NEXT_LOCK_ID (" +
+          " NL_NEXT bigint NOT NULL)");
+      s.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
+
+      s.execute("CREATE TABLE COMPACTION_QUEUE (" +
+          " CQ_ID bigint PRIMARY KEY," +
+          " CQ_DATABASE varchar(128) NOT NULL," +
+          " CQ_TABLE varchar(128) NOT NULL," +
+          " CQ_PARTITION varchar(767)," +
+          " CQ_STATE char(1) NOT NULL," +
+          " CQ_TYPE char(1) NOT NULL," +
+          " CQ_WORKER_ID varchar(128)," +
+          " CQ_START bigint," +
+          " CQ_RUN_AS varchar(128))");
+
+      s.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
+      s.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
+
+      conn.commit();
+      committed = true;
+    } finally {
+      if (!committed) conn.rollback();
+      conn.close();
+    }
   }
 
   public static void cleanDb() throws  Exception {
-    Connection conn = getConnection();
-    Statement s = conn.createStatement();
-    // We want to try these, whether they succeed or fail.
-    try {
-      s.execute("DROP INDEX HL_TXNID_INDEX");
-    } catch (Exception e) {
-      System.err.println("Unable to drop index HL_TXNID_INDEX " +
-          e.getMessage());
-    }
-    try {
-      s.execute("DROP TABLE TXN_COMPONENTS");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table TXN_COMPONENTS " +
-          e.getMessage());
-    }
-    try {
-      s.execute("DROP TABLE COMPLETED_TXN_COMPONENTS");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table COMPLETED_TXN_COMPONENTS " +
-          e.getMessage());
-    }
-    try {
-      s.execute("DROP TABLE TXNS");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table TXNS " +
-          e.getMessage());
-    }
+    Connection conn = null;
+    boolean committed = false;
     try {
-      s.execute("DROP TABLE NEXT_TXN_ID");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table NEXT_TXN_ID " +
-          e.getMessage());
+      conn = getConnection();
+      Statement s = conn.createStatement();
+      // We want to try these, whether they succeed or fail.
+      try {
+        s.execute("DROP INDEX HL_TXNID_INDEX");
+      } catch (Exception e) {
+        System.err.println("Unable to drop index HL_TXNID_INDEX " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE TXN_COMPONENTS");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table TXN_COMPONENTS " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE COMPLETED_TXN_COMPONENTS");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table COMPLETED_TXN_COMPONENTS " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE TXNS");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table TXNS " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE NEXT_TXN_ID");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table NEXT_TXN_ID " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE HIVE_LOCKS");
+      } catch (Exception e) {
+        System.err.println("Unable to drop table HIVE_LOCKS " +
+            e.getMessage());
+      }
+      try {
+        s.execute("DROP TABLE NEXT_LOCK_ID");
+      } catch (Exception e) {
+      }
+      try {
+        s.execute("DROP TABLE COMPACTION_QUEUE");
+      } catch (Exception e) {
+      }
+      try {
+        s.execute("DROP TABLE NEXT_COMPACTION_QUEUE_ID");
+      } catch (Exception e) {
+      }
+      conn.commit();
+      committed = true;
+    } finally {
+      if (!committed) conn.rollback();
+      conn.close();
     }
-    try {
-      s.execute("DROP TABLE HIVE_LOCKS");
-    } catch (Exception e) {
-      System.err.println("Unable to drop table HIVE_LOCKS " +
-          e.getMessage());
-    }
-    try {
-      s.execute("DROP TABLE NEXT_LOCK_ID");
-    } catch (Exception e) {
-    }
-    try {
-      s.execute("DROP TABLE COMPACTION_QUEUE");
-    } catch (Exception e) {
-    }
-    try {
-      s.execute("DROP TABLE NEXT_COMPACTION_QUEUE_ID");
-    } catch (Exception e) {
-    }
-    conn.commit();
-    conn.close();
   }
 
   /**
@@ -174,25 +188,34 @@ public class TxnDbUtil {
    */
   public static int countLockComponents(long lockId) throws  Exception {
     Connection conn = getConnection();
-    Statement s = conn.createStatement();
-    ResultSet rs = s.executeQuery("select count(*) from hive_locks where " +
-        "hl_lock_ext_id = " + lockId);
-    if (!rs.next()) return 0;
-    int rc = rs.getInt(1);
-    conn.rollback();
-    conn.close();
-    return rc;
+    try {
+      Statement s = conn.createStatement();
+      ResultSet rs = s.executeQuery("select count(*) from hive_locks where hl_lock_ext_id = " +
+          lockId);
+      if (!rs.next()) return 0;
+      int rc = rs.getInt(1);
+      return rc;
+    } finally {
+      conn.rollback();
+      conn.close();
+    }
   }
 
   public static int findNumCurrentLocks() throws Exception {
-    Connection conn = getConnection();
-    Statement s = conn.createStatement();
-    ResultSet rs = s.executeQuery("select count(*) from hive_locks");
-    if (!rs.next()) return 0;
-    int rc = rs.getInt(1);
-    conn.rollback();
-    conn.close();
-    return rc;
+    Connection conn = null;
+    try {
+      conn = getConnection();
+      Statement s = conn.createStatement();
+      ResultSet rs = s.executeQuery("select count(*) from hive_locks");
+      if (!rs.next()) return 0;
+      int rc = rs.getInt(1);
+      return rc;
+    } finally {
+      if (conn != null) {
+        conn.rollback();
+        conn.close();
+      }
+    }
   }
 
   private static Connection getConnection() throws Exception {

Modified: hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java (original)
+++ hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java Sun Aug 24 03:43:48 2014
@@ -24,6 +24,7 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -712,7 +713,7 @@ public class DummyRawStoreControlledComm
   }
 
   @Override
-  public List<ColumnStatisticsObj> get_aggr_stats_for(String dbName,
+  public AggrStats get_aggr_stats_for(String dbName,
       String tblName, List<String> partNames, List<String> colNames)
       throws MetaException {
     return null;

Modified: hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java (original)
+++ hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java Sun Aug 24 03:43:48 2014
@@ -25,6 +25,7 @@ import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -729,7 +730,7 @@ public class DummyRawStoreForJdoConnecti
   }
 
   @Override
-  public List<ColumnStatisticsObj> get_aggr_stats_for(String dbName,
+  public AggrStats get_aggr_stats_for(String dbName,
       String tblName, List<String> partNames, List<String> colNames)
       throws MetaException {
     return null;

Modified: hive/branches/spark/packaging/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/packaging/pom.xml?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/packaging/pom.xml (original)
+++ hive/branches/spark/packaging/pom.xml Sun Aug 24 03:43:48 2014
@@ -182,6 +182,11 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-accumulo-handler</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hive.hcatalog</groupId>
       <artifactId>hive-hcatalog-streaming</artifactId>
       <version>${project.version}</version>

Modified: hive/branches/spark/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/pom.xml?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/pom.xml (original)
+++ hive/branches/spark/pom.xml Sun Aug 24 03:43:48 2014
@@ -31,6 +31,7 @@
   </prerequisites>
 
   <modules>
+    <module>accumulo-handler</module>
     <module>ant</module>
     <module>beeline</module>
     <module>cli</module>
@@ -87,6 +88,7 @@
     <maven.build-helper.plugin.version>1.8</maven.build-helper.plugin.version>
 
     <!-- Library Dependency Versions -->
+    <accumulo.version>1.6.0</accumulo.version>
     <activemq.version>5.5.0</activemq.version>
     <ant.version>1.9.1</ant.version>
     <antlr.version>3.4</antlr.version>
@@ -378,6 +380,31 @@
         <version>${commons-exec.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-core</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-fate</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-minicluster</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-start</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-trace</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
         <groupId>org.apache.activemq</groupId>
         <artifactId>activemq-core</artifactId>
         <version>${activemq.version}</version>

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Sun Aug 24 03:43:48 2014
@@ -754,6 +754,9 @@ public class Driver implements CommandPr
         objName = privObject.getD();
         break;
       case FUNCTION:
+        if(privObject.getDatabase() != null) {
+          dbname = privObject.getDatabase().getName();
+        }
         objName = privObject.getFunctionName();
         break;
       case DUMMYPARTITION:

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java Sun Aug 24 03:43:48 2014
@@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.QueryPlan;
@@ -342,9 +343,7 @@ public class ColumnStatsTask extends Tas
     // Construct a column statistics object from the result
     List<ColumnStatistics> colStats = constructColumnStatsFromPackedRows();
     // Persist the column statistics object to the metastore
-    for (ColumnStatistics colStat : colStats) {
-      db.updatePartitionColumnStatistics(colStat);
-    }
+    db.setPartitionColumnStatistics(new SetPartitionsStatsRequest(colStats));
     return 0;
   }
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Sun Aug 24 03:43:48 2014
@@ -105,7 +105,7 @@ public class FetchOperator implements Se
   private transient JobConf job;
   private transient WritableComparable key;
   private transient Writable value;
-  private transient Writable[] vcValues;
+  private transient Object[] vcValues;
   private transient Deserializer serde;
   private transient Deserializer tblSerde;
   private transient Converter partTblObjectInspectorConverter;
@@ -141,12 +141,11 @@ public class FetchOperator implements Se
       List<String> names = new ArrayList<String>(vcCols.size());
       List<ObjectInspector> inspectors = new ArrayList<ObjectInspector>(vcCols.size());
       for (VirtualColumn vc : vcCols) {
-        inspectors.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
-                vc.getTypeInfo()));
+        inspectors.add(vc.getObjectInspector());
         names.add(vc.getName());
       }
       vcsOI = ObjectInspectorFactory.getStandardStructObjectInspector(names, inspectors);
-      vcValues = new Writable[vcCols.size()];
+      vcValues = new Object[vcCols.size()];
     }
     isPartitioned = work.isPartitioned();
     tblDataDone = false;

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java Sun Aug 24 03:43:48 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.io.IOContext;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
+import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.plan.MapWork;
@@ -140,7 +141,7 @@ public class MapOperator extends Operato
     String tableName;
     String partName;
     List<VirtualColumn> vcs;
-    Writable[] vcValues;
+    Object[] vcValues;
 
     private boolean isPartitioned() {
       return partObjectInspector != null;
@@ -165,7 +166,7 @@ public class MapOperator extends Operato
    * op.
    *
    * @param hconf
-   * @param mrwork
+   * @param mapWork
    * @throws HiveException
    */
   public void initializeAsRoot(Configuration hconf, MapWork mapWork)
@@ -250,13 +251,13 @@ public class MapOperator extends Operato
 
     // The op may not be a TableScan for mapjoins
     // Consider the query: select /*+MAPJOIN(a)*/ count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
-    // In that case, it will be a Select, but the rowOI need not be ammended
+    // In that case, it will be a Select, but the rowOI need not be amended
     if (ctx.op instanceof TableScanOperator) {
       TableScanOperator tsOp = (TableScanOperator) ctx.op;
       TableScanDesc tsDesc = tsOp.getConf();
       if (tsDesc != null && tsDesc.hasVirtualCols()) {
         opCtx.vcs = tsDesc.getVirtualCols();
-        opCtx.vcValues = new Writable[opCtx.vcs.size()];
+        opCtx.vcValues = new Object[opCtx.vcs.size()];
         opCtx.vcsObjectInspector = VirtualColumn.getVCSObjectInspector(opCtx.vcs);
         if (opCtx.isPartitioned()) {
           opCtx.rowWithPartAndVC = Arrays.copyOfRange(opCtx.rowWithPart, 0, 3);
@@ -550,13 +551,13 @@ public class MapOperator extends Operato
     }
   }
 
-  public static Writable[] populateVirtualColumnValues(ExecMapperContext ctx,
-      List<VirtualColumn> vcs, Writable[] vcValues, Deserializer deserializer) {
+  public static Object[] populateVirtualColumnValues(ExecMapperContext ctx,
+      List<VirtualColumn> vcs, Object[] vcValues, Deserializer deserializer) {
     if (vcs == null) {
       return vcValues;
     }
     if (vcValues == null) {
-      vcValues = new Writable[vcs.size()];
+      vcValues = new Object[vcs.size()];
     }
     for (int i = 0; i < vcs.size(); i++) {
       VirtualColumn vc = vcs.get(i);
@@ -602,6 +603,19 @@ public class MapOperator extends Operato
           old.set(current);
         }
       }
+      else if(vc.equals(VirtualColumn.ROWID)) {
+        if(ctx.getIoCxt().ri == null) {
+          vcValues[i] = null;
+        }
+        else {
+          if(vcValues[i] == null) {
+            vcValues[i] = new Object[RecordIdentifier.Field.values().length];
+          }
+          RecordIdentifier.StructInfo.toArray(ctx.getIoCxt().ri, (Object[])vcValues[i]);
+          ctx.getIoCxt().ri = null;//so we don't accidentally cache the value; shouldn't
+          //happen since IO layer either knows how to produce ROW__ID or not - but to be safe
+        }
+      }
     }
     return vcValues;
   }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java Sun Aug 24 03:43:48 2014
@@ -155,7 +155,7 @@ public class ExecMapper extends MapReduc
       }
     }
   }
-
+  @Override
   public void map(Object key, Object value, OutputCollector output,
       Reporter reporter) throws IOException {
     if (oc == null) {

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java Sun Aug 24 03:43:48 2014
@@ -22,6 +22,7 @@ import java.io.Serializable;
 import java.net.URI;
 import java.util.Map;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.ql.metadata.DummyPartition;
 import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -276,11 +277,13 @@ public class Entity implements Serializa
     }
 
     if (typ == Type.TABLE) {
-      return t.getDataLocation().toUri();
+      Path path = t.getDataLocation();
+      return path == null ? null : path.toUri();
     }
 
     if (typ == Type.PARTITION) {
-      return p.getDataLocation().toUri();
+      Path path = p.getDataLocation();
+      return path == null ? null : path.toUri();
     }
 
     if (typ == Type.DFS_DIR || typ == Type.LOCAL_DIR) {
@@ -333,6 +336,9 @@ public class Entity implements Serializa
     case DUMMYPARTITION:
       return p.getName();
     case FUNCTION:
+      if (database != null) {
+        return database.getName() + "." + stringObject;
+      }
       return stringObject;
     default:
       return d;