You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jx...@apache.org on 2015/11/06 18:32:43 UTC

[01/55] [abbrv] hive git commit: HIVE-12215: Exchange partition does not show outputs field for post/pre execute hooks (Aihua Xu, reviewed by Xuefu Zhang)

Repository: hive
Updated Branches:
  refs/heads/master-fixed [created] e8076ef41


http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 59c7b94..9873810 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -387,6 +387,17 @@ class Iface(fb303.FacebookService.Iface):
     """
     pass
 
+  def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+    """
+    Parameters:
+     - partitionSpecs
+     - source_db
+     - source_table_name
+     - dest_db
+     - dest_table_name
+    """
+    pass
+
   def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names):
     """
     Parameters:
@@ -2728,6 +2739,53 @@ class Client(fb303.FacebookService.Client, Iface):
       raise result.o4
     raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partition failed: unknown result")
 
+  def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+    """
+    Parameters:
+     - partitionSpecs
+     - source_db
+     - source_table_name
+     - dest_db
+     - dest_table_name
+    """
+    self.send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+    return self.recv_exchange_partitions()
+
+  def send_exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+    self._oprot.writeMessageBegin('exchange_partitions', TMessageType.CALL, self._seqid)
+    args = exchange_partitions_args()
+    args.partitionSpecs = partitionSpecs
+    args.source_db = source_db
+    args.source_table_name = source_table_name
+    args.dest_db = dest_db
+    args.dest_table_name = dest_table_name
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_exchange_partitions(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = exchange_partitions_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    if result.o3 is not None:
+      raise result.o3
+    if result.o4 is not None:
+      raise result.o4
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partitions failed: unknown result")
+
   def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names):
     """
     Parameters:
@@ -5817,6 +5875,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     self._processMap["drop_partitions_req"] = Processor.process_drop_partitions_req
     self._processMap["get_partition"] = Processor.process_get_partition
     self._processMap["exchange_partition"] = Processor.process_exchange_partition
+    self._processMap["exchange_partitions"] = Processor.process_exchange_partitions
     self._processMap["get_partition_with_auth"] = Processor.process_get_partition_with_auth
     self._processMap["get_partition_by_name"] = Processor.process_get_partition_by_name
     self._processMap["get_partitions"] = Processor.process_get_partitions
@@ -7069,6 +7128,37 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
+  def process_exchange_partitions(self, seqid, iprot, oprot):
+    args = exchange_partitions_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = exchange_partitions_result()
+    try:
+      result.success = self._handler.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except MetaException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except NoSuchObjectException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except InvalidObjectException as o3:
+      msg_type = TMessageType.REPLY
+      result.o3 = o3
+    except InvalidInputException as o4:
+      msg_type = TMessageType.REPLY
+      result.o4 = o4
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("exchange_partitions", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
   def process_get_partition_with_auth(self, seqid, iprot, oprot):
     args = get_partition_with_auth_args()
     args.read(iprot)
@@ -17089,6 +17179,262 @@ class exchange_partition_result:
   def __ne__(self, other):
     return not (self == other)
 
+class exchange_partitions_args:
+  """
+  Attributes:
+   - partitionSpecs
+   - source_db
+   - source_table_name
+   - dest_db
+   - dest_table_name
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.MAP, 'partitionSpecs', (TType.STRING,None,TType.STRING,None), None, ), # 1
+    (2, TType.STRING, 'source_db', None, None, ), # 2
+    (3, TType.STRING, 'source_table_name', None, None, ), # 3
+    (4, TType.STRING, 'dest_db', None, None, ), # 4
+    (5, TType.STRING, 'dest_table_name', None, None, ), # 5
+  )
+
+  def __init__(self, partitionSpecs=None, source_db=None, source_table_name=None, dest_db=None, dest_table_name=None,):
+    self.partitionSpecs = partitionSpecs
+    self.source_db = source_db
+    self.source_table_name = source_table_name
+    self.dest_db = dest_db
+    self.dest_table_name = dest_table_name
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.MAP:
+          self.partitionSpecs = {}
+          (_ktype667, _vtype668, _size666 ) = iprot.readMapBegin()
+          for _i670 in xrange(_size666):
+            _key671 = iprot.readString()
+            _val672 = iprot.readString()
+            self.partitionSpecs[_key671] = _val672
+          iprot.readMapEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.source_db = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.source_table_name = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.STRING:
+          self.dest_db = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.STRING:
+          self.dest_table_name = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('exchange_partitions_args')
+    if self.partitionSpecs is not None:
+      oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
+      oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
+      for kiter673,viter674 in self.partitionSpecs.items():
+        oprot.writeString(kiter673)
+        oprot.writeString(viter674)
+      oprot.writeMapEnd()
+      oprot.writeFieldEnd()
+    if self.source_db is not None:
+      oprot.writeFieldBegin('source_db', TType.STRING, 2)
+      oprot.writeString(self.source_db)
+      oprot.writeFieldEnd()
+    if self.source_table_name is not None:
+      oprot.writeFieldBegin('source_table_name', TType.STRING, 3)
+      oprot.writeString(self.source_table_name)
+      oprot.writeFieldEnd()
+    if self.dest_db is not None:
+      oprot.writeFieldBegin('dest_db', TType.STRING, 4)
+      oprot.writeString(self.dest_db)
+      oprot.writeFieldEnd()
+    if self.dest_table_name is not None:
+      oprot.writeFieldBegin('dest_table_name', TType.STRING, 5)
+      oprot.writeString(self.dest_table_name)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.partitionSpecs)
+    value = (value * 31) ^ hash(self.source_db)
+    value = (value * 31) ^ hash(self.source_table_name)
+    value = (value * 31) ^ hash(self.dest_db)
+    value = (value * 31) ^ hash(self.dest_table_name)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class exchange_partitions_result:
+  """
+  Attributes:
+   - success
+   - o1
+   - o2
+   - o3
+   - o4
+  """
+
+  thrift_spec = (
+    (0, TType.LIST, 'success', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 0
+    (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+    (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2
+    (3, TType.STRUCT, 'o3', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 3
+    (4, TType.STRUCT, 'o4', (InvalidInputException, InvalidInputException.thrift_spec), None, ), # 4
+  )
+
+  def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,):
+    self.success = success
+    self.o1 = o1
+    self.o2 = o2
+    self.o3 = o3
+    self.o4 = o4
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.LIST:
+          self.success = []
+          (_etype678, _size675) = iprot.readListBegin()
+          for _i679 in xrange(_size675):
+            _elem680 = Partition()
+            _elem680.read(iprot)
+            self.success.append(_elem680)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 1:
+        if ftype == TType.STRUCT:
+          self.o1 = MetaException()
+          self.o1.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRUCT:
+          self.o2 = NoSuchObjectException()
+          self.o2.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRUCT:
+          self.o3 = InvalidObjectException()
+          self.o3.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.STRUCT:
+          self.o4 = InvalidInputException()
+          self.o4.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('exchange_partitions_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.LIST, 0)
+      oprot.writeListBegin(TType.STRUCT, len(self.success))
+      for iter681 in self.success:
+        iter681.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.o1 is not None:
+      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+      self.o1.write(oprot)
+      oprot.writeFieldEnd()
+    if self.o2 is not None:
+      oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+      self.o2.write(oprot)
+      oprot.writeFieldEnd()
+    if self.o3 is not None:
+      oprot.writeFieldBegin('o3', TType.STRUCT, 3)
+      self.o3.write(oprot)
+      oprot.writeFieldEnd()
+    if self.o4 is not None:
+      oprot.writeFieldBegin('o4', TType.STRUCT, 4)
+      self.o4.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    value = (value * 31) ^ hash(self.o1)
+    value = (value * 31) ^ hash(self.o2)
+    value = (value * 31) ^ hash(self.o3)
+    value = (value * 31) ^ hash(self.o4)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class get_partition_with_auth_args:
   """
   Attributes:
@@ -17137,10 +17483,10 @@ class get_partition_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype669, _size666) = iprot.readListBegin()
-          for _i670 in xrange(_size666):
-            _elem671 = iprot.readString()
-            self.part_vals.append(_elem671)
+          (_etype685, _size682) = iprot.readListBegin()
+          for _i686 in xrange(_size682):
+            _elem687 = iprot.readString()
+            self.part_vals.append(_elem687)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17152,10 +17498,10 @@ class get_partition_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype675, _size672) = iprot.readListBegin()
-          for _i676 in xrange(_size672):
-            _elem677 = iprot.readString()
-            self.group_names.append(_elem677)
+          (_etype691, _size688) = iprot.readListBegin()
+          for _i692 in xrange(_size688):
+            _elem693 = iprot.readString()
+            self.group_names.append(_elem693)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17180,8 +17526,8 @@ class get_partition_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter678 in self.part_vals:
-        oprot.writeString(iter678)
+      for iter694 in self.part_vals:
+        oprot.writeString(iter694)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.user_name is not None:
@@ -17191,8 +17537,8 @@ class get_partition_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter679 in self.group_names:
-        oprot.writeString(iter679)
+      for iter695 in self.group_names:
+        oprot.writeString(iter695)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -17621,11 +17967,11 @@ class get_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype683, _size680) = iprot.readListBegin()
-          for _i684 in xrange(_size680):
-            _elem685 = Partition()
-            _elem685.read(iprot)
-            self.success.append(_elem685)
+          (_etype699, _size696) = iprot.readListBegin()
+          for _i700 in xrange(_size696):
+            _elem701 = Partition()
+            _elem701.read(iprot)
+            self.success.append(_elem701)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17654,8 +18000,8 @@ class get_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter686 in self.success:
-        iter686.write(oprot)
+      for iter702 in self.success:
+        iter702.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17749,10 +18095,10 @@ class get_partitions_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype690, _size687) = iprot.readListBegin()
-          for _i691 in xrange(_size687):
-            _elem692 = iprot.readString()
-            self.group_names.append(_elem692)
+          (_etype706, _size703) = iprot.readListBegin()
+          for _i707 in xrange(_size703):
+            _elem708 = iprot.readString()
+            self.group_names.append(_elem708)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17785,8 +18131,8 @@ class get_partitions_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter693 in self.group_names:
-        oprot.writeString(iter693)
+      for iter709 in self.group_names:
+        oprot.writeString(iter709)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -17847,11 +18193,11 @@ class get_partitions_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype697, _size694) = iprot.readListBegin()
-          for _i698 in xrange(_size694):
-            _elem699 = Partition()
-            _elem699.read(iprot)
-            self.success.append(_elem699)
+          (_etype713, _size710) = iprot.readListBegin()
+          for _i714 in xrange(_size710):
+            _elem715 = Partition()
+            _elem715.read(iprot)
+            self.success.append(_elem715)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17880,8 +18226,8 @@ class get_partitions_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter700 in self.success:
-        iter700.write(oprot)
+      for iter716 in self.success:
+        iter716.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18039,11 +18385,11 @@ class get_partitions_pspec_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype704, _size701) = iprot.readListBegin()
-          for _i705 in xrange(_size701):
-            _elem706 = PartitionSpec()
-            _elem706.read(iprot)
-            self.success.append(_elem706)
+          (_etype720, _size717) = iprot.readListBegin()
+          for _i721 in xrange(_size717):
+            _elem722 = PartitionSpec()
+            _elem722.read(iprot)
+            self.success.append(_elem722)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18072,8 +18418,8 @@ class get_partitions_pspec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter707 in self.success:
-        iter707.write(oprot)
+      for iter723 in self.success:
+        iter723.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18228,10 +18574,10 @@ class get_partition_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype711, _size708) = iprot.readListBegin()
-          for _i712 in xrange(_size708):
-            _elem713 = iprot.readString()
-            self.success.append(_elem713)
+          (_etype727, _size724) = iprot.readListBegin()
+          for _i728 in xrange(_size724):
+            _elem729 = iprot.readString()
+            self.success.append(_elem729)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18254,8 +18600,8 @@ class get_partition_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter714 in self.success:
-        oprot.writeString(iter714)
+      for iter730 in self.success:
+        oprot.writeString(iter730)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -18331,10 +18677,10 @@ class get_partitions_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype718, _size715) = iprot.readListBegin()
-          for _i719 in xrange(_size715):
-            _elem720 = iprot.readString()
-            self.part_vals.append(_elem720)
+          (_etype734, _size731) = iprot.readListBegin()
+          for _i735 in xrange(_size731):
+            _elem736 = iprot.readString()
+            self.part_vals.append(_elem736)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18364,8 +18710,8 @@ class get_partitions_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter721 in self.part_vals:
-        oprot.writeString(iter721)
+      for iter737 in self.part_vals:
+        oprot.writeString(iter737)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -18429,11 +18775,11 @@ class get_partitions_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype725, _size722) = iprot.readListBegin()
-          for _i726 in xrange(_size722):
-            _elem727 = Partition()
-            _elem727.read(iprot)
-            self.success.append(_elem727)
+          (_etype741, _size738) = iprot.readListBegin()
+          for _i742 in xrange(_size738):
+            _elem743 = Partition()
+            _elem743.read(iprot)
+            self.success.append(_elem743)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18462,8 +18808,8 @@ class get_partitions_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter728 in self.success:
-        iter728.write(oprot)
+      for iter744 in self.success:
+        iter744.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18550,10 +18896,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype732, _size729) = iprot.readListBegin()
-          for _i733 in xrange(_size729):
-            _elem734 = iprot.readString()
-            self.part_vals.append(_elem734)
+          (_etype748, _size745) = iprot.readListBegin()
+          for _i749 in xrange(_size745):
+            _elem750 = iprot.readString()
+            self.part_vals.append(_elem750)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18570,10 +18916,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 6:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype738, _size735) = iprot.readListBegin()
-          for _i739 in xrange(_size735):
-            _elem740 = iprot.readString()
-            self.group_names.append(_elem740)
+          (_etype754, _size751) = iprot.readListBegin()
+          for _i755 in xrange(_size751):
+            _elem756 = iprot.readString()
+            self.group_names.append(_elem756)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18598,8 +18944,8 @@ class get_partitions_ps_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter741 in self.part_vals:
-        oprot.writeString(iter741)
+      for iter757 in self.part_vals:
+        oprot.writeString(iter757)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -18613,8 +18959,8 @@ class get_partitions_ps_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 6)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter742 in self.group_names:
-        oprot.writeString(iter742)
+      for iter758 in self.group_names:
+        oprot.writeString(iter758)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -18676,11 +19022,11 @@ class get_partitions_ps_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype746, _size743) = iprot.readListBegin()
-          for _i747 in xrange(_size743):
-            _elem748 = Partition()
-            _elem748.read(iprot)
-            self.success.append(_elem748)
+          (_etype762, _size759) = iprot.readListBegin()
+          for _i763 in xrange(_size759):
+            _elem764 = Partition()
+            _elem764.read(iprot)
+            self.success.append(_elem764)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18709,8 +19055,8 @@ class get_partitions_ps_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter749 in self.success:
-        iter749.write(oprot)
+      for iter765 in self.success:
+        iter765.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18791,10 +19137,10 @@ class get_partition_names_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype753, _size750) = iprot.readListBegin()
-          for _i754 in xrange(_size750):
-            _elem755 = iprot.readString()
-            self.part_vals.append(_elem755)
+          (_etype769, _size766) = iprot.readListBegin()
+          for _i770 in xrange(_size766):
+            _elem771 = iprot.readString()
+            self.part_vals.append(_elem771)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18824,8 +19170,8 @@ class get_partition_names_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter756 in self.part_vals:
-        oprot.writeString(iter756)
+      for iter772 in self.part_vals:
+        oprot.writeString(iter772)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -18889,10 +19235,10 @@ class get_partition_names_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype760, _size757) = iprot.readListBegin()
-          for _i761 in xrange(_size757):
-            _elem762 = iprot.readString()
-            self.success.append(_elem762)
+          (_etype776, _size773) = iprot.readListBegin()
+          for _i777 in xrange(_size773):
+            _elem778 = iprot.readString()
+            self.success.append(_elem778)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18921,8 +19267,8 @@ class get_partition_names_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter763 in self.success:
-        oprot.writeString(iter763)
+      for iter779 in self.success:
+        oprot.writeString(iter779)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -19093,11 +19439,11 @@ class get_partitions_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype767, _size764) = iprot.readListBegin()
-          for _i768 in xrange(_size764):
-            _elem769 = Partition()
-            _elem769.read(iprot)
-            self.success.append(_elem769)
+          (_etype783, _size780) = iprot.readListBegin()
+          for _i784 in xrange(_size780):
+            _elem785 = Partition()
+            _elem785.read(iprot)
+            self.success.append(_elem785)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19126,8 +19472,8 @@ class get_partitions_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter770 in self.success:
-        iter770.write(oprot)
+      for iter786 in self.success:
+        iter786.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -19298,11 +19644,11 @@ class get_part_specs_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype774, _size771) = iprot.readListBegin()
-          for _i775 in xrange(_size771):
-            _elem776 = PartitionSpec()
-            _elem776.read(iprot)
-            self.success.append(_elem776)
+          (_etype790, _size787) = iprot.readListBegin()
+          for _i791 in xrange(_size787):
+            _elem792 = PartitionSpec()
+            _elem792.read(iprot)
+            self.success.append(_elem792)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19331,8 +19677,8 @@ class get_part_specs_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter777 in self.success:
-        iter777.write(oprot)
+      for iter793 in self.success:
+        iter793.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -19569,10 +19915,10 @@ class get_partitions_by_names_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.names = []
-          (_etype781, _size778) = iprot.readListBegin()
-          for _i782 in xrange(_size778):
-            _elem783 = iprot.readString()
-            self.names.append(_elem783)
+          (_etype797, _size794) = iprot.readListBegin()
+          for _i798 in xrange(_size794):
+            _elem799 = iprot.readString()
+            self.names.append(_elem799)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19597,8 +19943,8 @@ class get_partitions_by_names_args:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter784 in self.names:
-        oprot.writeString(iter784)
+      for iter800 in self.names:
+        oprot.writeString(iter800)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -19657,11 +20003,11 @@ class get_partitions_by_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype788, _size785) = iprot.readListBegin()
-          for _i789 in xrange(_size785):
-            _elem790 = Partition()
-            _elem790.read(iprot)
-            self.success.append(_elem790)
+          (_etype804, _size801) = iprot.readListBegin()
+          for _i805 in xrange(_size801):
+            _elem806 = Partition()
+            _elem806.read(iprot)
+            self.success.append(_elem806)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19690,8 +20036,8 @@ class get_partitions_by_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter791 in self.success:
-        iter791.write(oprot)
+      for iter807 in self.success:
+        iter807.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -19941,11 +20287,11 @@ class alter_partitions_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype795, _size792) = iprot.readListBegin()
-          for _i796 in xrange(_size792):
-            _elem797 = Partition()
-            _elem797.read(iprot)
-            self.new_parts.append(_elem797)
+          (_etype811, _size808) = iprot.readListBegin()
+          for _i812 in xrange(_size808):
+            _elem813 = Partition()
+            _elem813.read(iprot)
+            self.new_parts.append(_elem813)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19970,8 +20316,8 @@ class alter_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter798 in self.new_parts:
-        iter798.write(oprot)
+      for iter814 in self.new_parts:
+        iter814.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20310,10 +20656,10 @@ class rename_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype802, _size799) = iprot.readListBegin()
-          for _i803 in xrange(_size799):
-            _elem804 = iprot.readString()
-            self.part_vals.append(_elem804)
+          (_etype818, _size815) = iprot.readListBegin()
+          for _i819 in xrange(_size815):
+            _elem820 = iprot.readString()
+            self.part_vals.append(_elem820)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20344,8 +20690,8 @@ class rename_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter805 in self.part_vals:
-        oprot.writeString(iter805)
+      for iter821 in self.part_vals:
+        oprot.writeString(iter821)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.new_part is not None:
@@ -20487,10 +20833,10 @@ class partition_name_has_valid_characters_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype809, _size806) = iprot.readListBegin()
-          for _i810 in xrange(_size806):
-            _elem811 = iprot.readString()
-            self.part_vals.append(_elem811)
+          (_etype825, _size822) = iprot.readListBegin()
+          for _i826 in xrange(_size822):
+            _elem827 = iprot.readString()
+            self.part_vals.append(_elem827)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20512,8 +20858,8 @@ class partition_name_has_valid_characters_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter812 in self.part_vals:
-        oprot.writeString(iter812)
+      for iter828 in self.part_vals:
+        oprot.writeString(iter828)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.throw_exception is not None:
@@ -20871,10 +21217,10 @@ class partition_name_to_vals_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype816, _size813) = iprot.readListBegin()
-          for _i817 in xrange(_size813):
-            _elem818 = iprot.readString()
-            self.success.append(_elem818)
+          (_etype832, _size829) = iprot.readListBegin()
+          for _i833 in xrange(_size829):
+            _elem834 = iprot.readString()
+            self.success.append(_elem834)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20897,8 +21243,8 @@ class partition_name_to_vals_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter819 in self.success:
-        oprot.writeString(iter819)
+      for iter835 in self.success:
+        oprot.writeString(iter835)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21022,11 +21368,11 @@ class partition_name_to_spec_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype821, _vtype822, _size820 ) = iprot.readMapBegin()
-          for _i824 in xrange(_size820):
-            _key825 = iprot.readString()
-            _val826 = iprot.readString()
-            self.success[_key825] = _val826
+          (_ktype837, _vtype838, _size836 ) = iprot.readMapBegin()
+          for _i840 in xrange(_size836):
+            _key841 = iprot.readString()
+            _val842 = iprot.readString()
+            self.success[_key841] = _val842
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -21049,9 +21395,9 @@ class partition_name_to_spec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
-      for kiter827,viter828 in self.success.items():
-        oprot.writeString(kiter827)
-        oprot.writeString(viter828)
+      for kiter843,viter844 in self.success.items():
+        oprot.writeString(kiter843)
+        oprot.writeString(viter844)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21127,11 +21473,11 @@ class markPartitionForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype830, _vtype831, _size829 ) = iprot.readMapBegin()
-          for _i833 in xrange(_size829):
-            _key834 = iprot.readString()
-            _val835 = iprot.readString()
-            self.part_vals[_key834] = _val835
+          (_ktype846, _vtype847, _size845 ) = iprot.readMapBegin()
+          for _i849 in xrange(_size845):
+            _key850 = iprot.readString()
+            _val851 = iprot.readString()
+            self.part_vals[_key850] = _val851
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -21161,9 +21507,9 @@ class markPartitionForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter836,viter837 in self.part_vals.items():
-        oprot.writeString(kiter836)
-        oprot.writeString(viter837)
+      for kiter852,viter853 in self.part_vals.items():
+        oprot.writeString(kiter852)
+        oprot.writeString(viter853)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -21377,11 +21723,11 @@ class isPartitionMarkedForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype839, _vtype840, _size838 ) = iprot.readMapBegin()
-          for _i842 in xrange(_size838):
-            _key843 = iprot.readString()
-            _val844 = iprot.readString()
-            self.part_vals[_key843] = _val844
+          (_ktype855, _vtype856, _size854 ) = iprot.readMapBegin()
+          for _i858 in xrange(_size854):
+            _key859 = iprot.readString()
+            _val860 = iprot.readString()
+            self.part_vals[_key859] = _val860
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -21411,9 +21757,9 @@ class isPartitionMarkedForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter845,viter846 in self.part_vals.items():
-        oprot.writeString(kiter845)
-        oprot.writeString(viter846)
+      for kiter861,viter862 in self.part_vals.items():
+        oprot.writeString(kiter861)
+        oprot.writeString(viter862)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -22468,11 +22814,11 @@ class get_indexes_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype850, _size847) = iprot.readListBegin()
-          for _i851 in xrange(_size847):
-            _elem852 = Index()
-            _elem852.read(iprot)
-            self.success.append(_elem852)
+          (_etype866, _size863) = iprot.readListBegin()
+          for _i867 in xrange(_size863):
+            _elem868 = Index()
+            _elem868.read(iprot)
+            self.success.append(_elem868)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22501,8 +22847,8 @@ class get_indexes_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter853 in self.success:
-        iter853.write(oprot)
+      for iter869 in self.success:
+        iter869.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22657,10 +23003,10 @@ class get_index_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype857, _size854) = iprot.readListBegin()
-          for _i858 in xrange(_size854):
-            _elem859 = iprot.readString()
-            self.success.append(_elem859)
+          (_etype873, _size870) = iprot.readListBegin()
+          for _i874 in xrange(_size870):
+            _elem875 = iprot.readString()
+            self.success.append(_elem875)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22683,8 +23029,8 @@ class get_index_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter860 in self.success:
-        oprot.writeString(iter860)
+      for iter876 in self.success:
+        oprot.writeString(iter876)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -25232,10 +25578,10 @@ class get_functions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype864, _size861) = iprot.readListBegin()
-          for _i865 in xrange(_size861):
-            _elem866 = iprot.readString()
-            self.success.append(_elem866)
+          (_etype880, _size877) = iprot.readListBegin()
+          for _i881 in xrange(_size877):
+            _elem882 = iprot.readString()
+            self.success.append(_elem882)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25258,8 +25604,8 @@ class get_functions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter867 in self.success:
-        oprot.writeString(iter867)
+      for iter883 in self.success:
+        oprot.writeString(iter883)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -25947,10 +26293,10 @@ class get_role_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype871, _size868) = iprot.readListBegin()
-          for _i872 in xrange(_size868):
-            _elem873 = iprot.readString()
-            self.success.append(_elem873)
+          (_etype887, _size884) = iprot.readListBegin()
+          for _i888 in xrange(_size884):
+            _elem889 = iprot.readString()
+            self.success.append(_elem889)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25973,8 +26319,8 @@ class get_role_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter874 in self.success:
-        oprot.writeString(iter874)
+      for iter890 in self.success:
+        oprot.writeString(iter890)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -26488,11 +26834,11 @@ class list_roles_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype878, _size875) = iprot.readListBegin()
-          for _i879 in xrange(_size875):
-            _elem880 = Role()
-            _elem880.read(iprot)
-            self.success.append(_elem880)
+          (_etype894, _size891) = iprot.readListBegin()
+          for _i895 in xrange(_size891):
+            _elem896 = Role()
+            _elem896.read(iprot)
+            self.success.append(_elem896)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26515,8 +26861,8 @@ class list_roles_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter881 in self.success:
-        iter881.write(oprot)
+      for iter897 in self.success:
+        iter897.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27025,10 +27371,10 @@ class get_privilege_set_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype885, _size882) = iprot.readListBegin()
-          for _i886 in xrange(_size882):
-            _elem887 = iprot.readString()
-            self.group_names.append(_elem887)
+          (_etype901, _size898) = iprot.readListBegin()
+          for _i902 in xrange(_size898):
+            _elem903 = iprot.readString()
+            self.group_names.append(_elem903)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27053,8 +27399,8 @@ class get_privilege_set_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter888 in self.group_names:
-        oprot.writeString(iter888)
+      for iter904 in self.group_names:
+        oprot.writeString(iter904)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -27281,11 +27627,11 @@ class list_privileges_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype892, _size889) = iprot.readListBegin()
-          for _i893 in xrange(_size889):
-            _elem894 = HiveObjectPrivilege()
-            _elem894.read(iprot)
-            self.success.append(_elem894)
+          (_etype908, _size905) = iprot.readListBegin()
+          for _i909 in xrange(_size905):
+            _elem910 = HiveObjectPrivilege()
+            _elem910.read(iprot)
+            self.success.append(_elem910)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27308,8 +27654,8 @@ class list_privileges_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter895 in self.success:
-        iter895.write(oprot)
+      for iter911 in self.success:
+        iter911.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27807,10 +28153,10 @@ class set_ugi_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype899, _size896) = iprot.readListBegin()
-          for _i900 in xrange(_size896):
-            _elem901 = iprot.readString()
-            self.group_names.append(_elem901)
+          (_etype915, _size912) = iprot.readListBegin()
+          for _i916 in xrange(_size912):
+            _elem917 = iprot.readString()
+            self.group_names.append(_elem917)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27831,8 +28177,8 @@ class set_ugi_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter902 in self.group_names:
-        oprot.writeString(iter902)
+      for iter918 in self.group_names:
+        oprot.writeString(iter918)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -27887,10 +28233,10 @@ class set_ugi_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype906, _size903) = iprot.readListBegin()
-          for _i907 in xrange(_size903):
-            _elem908 = iprot.readString()
-            self.success.append(_elem908)
+          (_etype922, _size919) = iprot.readListBegin()
+          for _i923 in xrange(_size919):
+            _elem924 = iprot.readString()
+            self.success.append(_elem924)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27913,8 +28259,8 @@ class set_ugi_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter909 in self.success:
-        oprot.writeString(iter909)
+      for iter925 in self.success:
+        oprot.writeString(iter925)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 7b93158..c613e4b 100644
--- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -766,6 +766,25 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partition failed: unknown result')
     end
 
+    def exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+      send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+      return recv_exchange_partitions()
+    end
+
+    def send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+      send_message('exchange_partitions', Exchange_partitions_args, :partitionSpecs => partitionSpecs, :source_db => source_db, :source_table_name => source_table_name, :dest_db => dest_db, :dest_table_name => dest_table_name)
+    end
+
+    def recv_exchange_partitions()
+      result = receive_message(Exchange_partitions_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      raise result.o4 unless result.o4.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partitions failed: unknown result')
+    end
+
     def get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)
       send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)
       return recv_get_partition_with_auth()
@@ -2775,6 +2794,23 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'exchange_partition', seqid)
     end
 
+    def process_exchange_partitions(seqid, iprot, oprot)
+      args = read_args(iprot, Exchange_partitions_args)
+      result = Exchange_partitions_result.new()
+      begin
+        result.success = @handler.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name)
+      rescue ::MetaException => o1
+        result.o1 = o1
+      rescue ::NoSuchObjectException => o2
+        result.o2 = o2
+      rescue ::InvalidObjectException => o3
+        result.o3 = o3
+      rescue ::InvalidInputException => o4
+        result.o4 = o4
+      end
+      write_result(result, oprot, 'exchange_partitions', seqid)
+    end
+
     def process_get_partition_with_auth(seqid, iprot, oprot)
       args = read_args(iprot, Get_partition_with_auth_args)
       result = Get_partition_with_auth_result.new()
@@ -5509,6 +5545,54 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Exchange_partitions_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    PARTITIONSPECS = 1
+    SOURCE_DB = 2
+    SOURCE_TABLE_NAME = 3
+    DEST_DB = 4
+    DEST_TABLE_NAME = 5
+
+    FIELDS = {
+      PARTITIONSPECS => {:type => ::Thrift::Types::MAP, :name => 'partitionSpecs', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+      SOURCE_DB => {:type => ::Thrift::Types::STRING, :name => 'source_db'},
+      SOURCE_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'source_table_name'},
+      DEST_DB => {:type => ::Thrift::Types::STRING, :name => 'dest_db'},
+      DEST_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'dest_table_name'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Exchange_partitions_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+    O3 = 3
+    O4 = 4
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException},
+      O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Get_partition_with_auth_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 2740e40..2e9afaf 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2467,6 +2467,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         String sourceDbName, String sourceTableName, String destDbName,
         String destTableName) throws MetaException, NoSuchObjectException,
         InvalidObjectException, InvalidInputException, TException {
+      exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName);
+      return new Partition();
+    }
+
+    @Override
+    public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+        String sourceDbName, String sourceTableName, String destDbName,
+        String destTableName) throws MetaException, NoSuchObjectException,
+        InvalidObjectException, InvalidInputException, TException {
       boolean success = false;
       boolean pathCreated = false;
       RawStore ms = getMS();
@@ -2501,6 +2510,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Path destPath = new Path(destinationTable.getSd().getLocation(),
           Warehouse.makePartName(partitionKeysPresent, partValsPresent));
       try {
+        List<Partition> destPartitions = new ArrayList<Partition>();
         for (Partition partition: partitionsToExchange) {
           Partition destPartition = new Partition(partition);
           destPartition.setDbName(destDbName);
@@ -2509,6 +2519,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
               Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues()));
           destPartition.getSd().setLocation(destPartitionPath.toString());
           ms.addPartition(destPartition);
+          destPartitions.add(destPartition);
           ms.dropPartition(partition.getDbName(), sourceTable.getTableName(),
             partition.getValues());
         }
@@ -2524,6 +2535,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
          */
         pathCreated = wh.renameDir(sourcePath, destPath);
         success = ms.commitTransaction();
+        return destPartitions;
       } finally {
         if (!success || !pathCreated) {
           ms.rollbackTransaction();
@@ -2532,7 +2544,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
         }
       }
-      return new Partition();
     }
 
     private boolean drop_partition_common(RawStore ms, String db_name, String tbl_name,

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 3960f5d..f86ec45 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -659,6 +659,22 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
         destDb, destinationTableName);
   }
 
+  /**
+   * Exchange the partitions between two tables
+   * @param partitionSpecs partitions specs of the parent partition to be exchanged
+   * @param destDb the db of the destination table
+   * @param destinationTableName the destination table name
+   @ @return new partitions after exchanging
+   */
+  @Override
+  public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+      String sourceDb, String sourceTable, String destDb,
+      String destinationTableName) throws MetaException,
+      NoSuchObjectException, InvalidObjectException, TException {
+    return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable,
+        destDb, destinationTableName);
+  }
+
   @Override
   public void validatePartitionNameCharacters(List<String> partVals)
       throws TException, MetaException {

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index f3a23f5..9279cf5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -453,6 +453,22 @@ public interface IMetaStoreClient {
       InvalidObjectException, TException;
 
   /**
+   * With the one partitionSpecs to exchange, multiple partitions could be exchanged.
+   * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions
+   * belonging to it exchanged. This function returns the list of affected partitions.
+   * @param partitionSpecs
+   * @param sourceDb
+   * @param sourceTable
+   * @param destdb
+   * @param destTableName
+   * @return the list of the new partitions
+   */
+  List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+      String sourceDb, String sourceTable, String destdb,
+      String destTableName) throws MetaException, NoSuchObjectException,
+      InvalidObjectException, TException;
+
+  /**
    * @param dbName
    * @param tblName
    * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index ff86d6e..caf98b5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4203,9 +4203,20 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     Map<String, String> partitionSpecs = exchangePartition.getPartitionSpecs();
     Table destTable = exchangePartition.getDestinationTable();
     Table sourceTable = exchangePartition.getSourceTable();
-    db.exchangeTablePartitions(partitionSpecs, sourceTable.getDbName(),
+    List<Partition> partitions =
+        db.exchangeTablePartitions(partitionSpecs, sourceTable.getDbName(),
         sourceTable.getTableName(),destTable.getDbName(),
         destTable.getTableName());
+
+    for(Partition partition : partitions) {
+      // Reuse the partition specs from dest partition since they should be the same
+      work.getOutputs().add(new WriteEntity(new Partition(sourceTable, partition.getSpec(), null),
+          WriteEntity.WriteType.DELETE));
+
+      work.getOutputs().add(new WriteEntity(new Partition(destTable, partition.getSpec(), null),
+          WriteEntity.WriteType.INSERT));
+    }
+
     return 0;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 9db740b..488d923 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -2091,7 +2091,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       }
       List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().dropPartitions(
           dbName, tblName, partExprs, dropOptions);
-      return convertFromMetastore(tbl, tParts, null);
+      return convertFromMetastore(tbl, tParts);
     } catch (NoSuchObjectException e) {
       throw new HiveException("Partition or table doesn't exist.", e);
     } catch (Exception e) {
@@ -2335,22 +2335,20 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
     List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().listPartitionsByFilter(
         tbl.getDbName(), tbl.getTableName(), filter, (short)-1);
-    return convertFromMetastore(tbl, tParts, null);
+    return convertFromMetastore(tbl, tParts);
   }
 
   private static List<Partition> convertFromMetastore(Table tbl,
-      List<org.apache.hadoop.hive.metastore.api.Partition> src,
-      List<Partition> dest) throws HiveException {
-    if (src == null) {
-      return dest;
+      List<org.apache.hadoop.hive.metastore.api.Partition> partitions) throws HiveException {
+    if (partitions == null) {
+      return new ArrayList<Partition>();
     }
-    if (dest == null) {
-      dest = new ArrayList<Partition>(src.size());
-    }
-    for (org.apache.hadoop.hive.metastore.api.Partition tPart : src) {
-      dest.add(new Partition(tbl, tPart));
+
+    List<Partition> results = new ArrayList<Partition>(partitions.size());
+    for (org.apache.hadoop.hive.metastore.api.Partition tPart : partitions) {
+      results.add(new Partition(tbl, tPart));
     }
-    return dest;
+    return results;
   }
 
   /**
@@ -2370,7 +2368,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
         new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>();
     boolean hasUnknownParts = getMSC().listPartitionsByExpr(tbl.getDbName(),
         tbl.getTableName(), exprBytes, defaultPartitionName, (short)-1, msParts);
-    convertFromMetastore(tbl, msParts, result);
+    result.addAll(convertFromMetastore(tbl, msParts));
     return hasUnknownParts;
   }
 
@@ -3001,12 +2999,15 @@ private void constructOneLBLocationMap(FileStatus fSta,
     return ShimLoader.getMajorVersion().startsWith("0.20");
   }
 
-  public void exchangeTablePartitions(Map<String, String> partitionSpecs,
+  public List<Partition> exchangeTablePartitions(Map<String, String> partitionSpecs,
       String sourceDb, String sourceTable, String destDb,
       String destinationTableName) throws HiveException {
     try {
-      getMSC().exchange_partition(partitionSpecs, sourceDb, sourceTable, destDb,
+      List<org.apache.hadoop.hive.metastore.api.Partition> partitions =
+        getMSC().exchange_partitions(partitionSpecs, sourceDb, sourceTable, destDb,
         destinationTableName);
+
+      return convertFromMetastore(getTable(destDb, destinationTableName), partitions);
     } catch (Exception ex) {
       LOG.error(StringUtils.stringifyException(ex));
       throw new HiveException(ex);

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index b4546e1..eea2fcc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.parse;
 
 import com.google.common.collect.Lists;
+
 import org.antlr.runtime.tree.CommonTree;
 import org.antlr.runtime.tree.Tree;
 import org.slf4j.Logger;
@@ -49,6 +50,7 @@ import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType;
 import org.apache.hadoop.hive.ql.index.HiveIndex;
 import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType;
 import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
@@ -718,6 +720,9 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       new AlterTableExchangePartition(sourceTable, destTable, partSpecs);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
       alterTableExchangePartition), conf));
+
+    outputs.add(new WriteEntity(sourceTable, WriteType.DDL_SHARED));
+    outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientnegative/exchange_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/exchange_partition.q.out b/ql/src/test/results/clientnegative/exchange_partition.q.out
index 8622615..f5e332a 100644
--- a/ql/src/test/results/clientnegative/exchange_partition.q.out
+++ b/ql/src/test/results/clientnegative/exchange_partition.q.out
@@ -51,4 +51,6 @@ POSTHOOK: Input: default@ex_table2
 part=part1
 PREHOOK: query: ALTER TABLE ex_table1 EXCHANGE PARTITION (part='part1') WITH TABLE ex_table2
 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@ex_table1
+PREHOOK: Output: default@ex_table2
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.)

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientpositive/exchange_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exchange_partition.q.out b/ql/src/test/results/clientpositive/exchange_partition.q.out
index 5b21eaf..9316341 100644
--- a/ql/src/test/results/clientpositive/exchange_partition.q.out
+++ b/ql/src/test/results/clientpositive/exchange_partition.q.out
@@ -60,8 +60,14 @@ POSTHOOK: Input: ex2@exchange_part_test2
 ds=2013-04-05
 PREHOOK: query: ALTER TABLE ex1.exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE ex2.exchange_part_test2
 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: ex1@exchange_part_test1
+PREHOOK: Output: ex2@exchange_part_test2
 POSTHOOK: query: ALTER TABLE ex1.exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE ex2.exchange_part_test2
 POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: ex1@exchange_part_test1
+POSTHOOK: Output: ex1@exchange_part_test1@ds=2013-04-05
+POSTHOOK: Output: ex2@exchange_part_test2
+POSTHOOK: Output: ex2@exchange_part_test2@ds=2013-04-05
 PREHOOK: query: SHOW PARTITIONS ex1.exchange_part_test1
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: ex1@exchange_part_test1

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientpositive/exchange_partition2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exchange_partition2.q.out b/ql/src/test/results/clientpositive/exchange_partition2.q.out
index 8c7c583..05121d8 100644
--- a/ql/src/test/results/clientpositive/exchange_partition2.q.out
+++ b/ql/src/test/results/clientpositive/exchange_partition2.q.out
@@ -48,8 +48,14 @@ POSTHOOK: Input: default@exchange_part_test2
 ds=2013-04-05/hr=1
 PREHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05', hr='1') WITH TABLE exchange_part_test2
 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@exchange_part_test1
+PREHOOK: Output: default@exchange_part_test2
 POSTHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05', hr='1') WITH TABLE exchange_part_test2
 POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@exchange_part_test1
+POSTHOOK: Output: default@exchange_part_test1@ds=2013-04-05/hr=1
+POSTHOOK: Output: default@exchange_part_test2
+POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=1
 PREHOOK: query: SHOW PARTITIONS exchange_part_test1
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@exchange_part_test1

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientpositive/exchange_partition3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exchange_partition3.q.out b/ql/src/test/results/clientpositive/exchange_partition3.q.out
index 3815861..014be7c 100644
--- a/ql/src/test/results/clientpositive/exchange_partition3.q.out
+++ b/ql/src/test/results/clientpositive/exchange_partition3.q.out
@@ -65,9 +65,17 @@ ds=2013-04-05/hr=2
 PREHOOK: query: -- This will exchange both partitions hr=1 and hr=2
 ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2
 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@exchange_part_test1
+PREHOOK: Output: default@exchange_part_test2
 POSTHOOK: query: -- This will exchange both partitions hr=1 and hr=2
 ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2
 POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@exchange_part_test1
+POSTHOOK: Output: default@exchange_part_test1@ds=2013-04-05/hr=1
+POSTHOOK: Output: default@exchange_part_test1@ds=2013-04-05/hr=2
+POSTHOOK: Output: default@exchange_part_test2
+POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=1
+POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=2
 PREHOOK: query: SHOW PARTITIONS exchange_part_test1
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@exchange_part_test1

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exchgpartition2lel.q.out b/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
index 5997d6b..3fd996a 100644
--- a/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
+++ b/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
@@ -113,8 +113,14 @@ POSTHOOK: Input: default@t3@d1=1/d2=1
 100	1	1
 PREHOOK: query: ALTER TABLE t2 EXCHANGE PARTITION (d1 = 1) WITH TABLE t1
 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@t1
+PREHOOK: Output: default@t2
 POSTHOOK: query: ALTER TABLE t2 EXCHANGE PARTITION (d1 = 1) WITH TABLE t1
 POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@t1
+POSTHOOK: Output: default@t1@d1=1
+POSTHOOK: Output: default@t2
+POSTHOOK: Output: default@t2@d1=1
 PREHOOK: query: SELECT * FROM t1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -136,8 +142,14 @@ POSTHOOK: Input: default@t2@d1=1
 100	1
 PREHOOK: query: ALTER TABLE t4 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3
 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@t3
+PREHOOK: Output: default@t4
 POSTHOOK: query: ALTER TABLE t4 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3
 POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@t3
+POSTHOOK: Output: default@t3@d1=1/d2=1
+POSTHOOK: Output: default@t4
+POSTHOOK: Output: default@t4@d1=1/d2=1
 PREHOOK: query: SELECT * FROM t3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t3
@@ -159,8 +171,14 @@ POSTHOOK: Input: default@t4@d1=1/d2=1
 100	1	1
 PREHOOK: query: ALTER TABLE t6 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5
 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@t5
+PREHOOK: Output: default@t6
 POSTHOOK: query: ALTER TABLE t6 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5
 POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@t5
+POSTHOOK: Output: default@t5@d1=1/d2=1/d3=1
+POSTHOOK: Output: default@t6
+POSTHOOK: Output: default@t6@d1=1/d2=1/d3=1
 PREHOOK: query: SELECT * FROM t5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t5


[41/55] [abbrv] hive git commit: HIVE-12333: tez_union_with_udf.q added to wrong section in testconfiguration.properties (Jason Dere, reviewed by Chinna Lalam)

Posted by jx...@apache.org.
HIVE-12333: tez_union_with_udf.q added to wrong section in testconfiguration.properties (Jason Dere, reviewed by Chinna Lalam)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0a905624
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0a905624
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0a905624

Branch: refs/heads/master-fixed
Commit: 0a905624f1712120db8b41586248201002d5a544
Parents: 37f05f4
Author: Jason Dere <jd...@hortonworks.com>
Authored: Wed Nov 4 17:14:34 2015 -0800
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Wed Nov 4 17:14:34 2015 -0800

----------------------------------------------------------------------
 itests/src/test/resources/testconfiguration.properties | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0a905624/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 2d1d274..d16c318 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -379,6 +379,7 @@ minitez.query.files=bucket_map_join_tez1.q,\
   tez_union2.q,\
   tez_union_dynamic_partition.q,\
   tez_union_view.q,\
+  tez_union_with_udf.q,\
   tez_union_decimal.q,\
   tez_union_group_by.q,\
   tez_smb_main.q,\
@@ -424,7 +425,6 @@ minillap.query.files=bucket_map_join_tez1.q,\
   tez_union_view.q,\
   tez_union_decimal.q,\
   tez_union_group_by.q,\
-  tez_union_with_udf.q,\
   tez_smb_main.q,\
   tez_smb_1.q,\
   vectorized_dynamic_partition_pruning.q,\


[22/55] [abbrv] hive git commit: HIVE-12202 NPE thrown when reading legacy ACID delta files(Elliot West via Eugene Koifman)

Posted by jx...@apache.org.
HIVE-12202 NPE thrown when reading legacy ACID delta files(Elliot West via Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/02629e97
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/02629e97
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/02629e97

Branch: refs/heads/master-fixed
Commit: 02629e9794e228dcaa8d446423a256d75f71d6dd
Parents: 47617d3
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Tue Nov 3 09:06:19 2015 -0800
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Tue Nov 3 09:06:19 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/io/AcidInputFormat.java | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/02629e97/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
index 24506b7..7c7074d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
@@ -33,7 +33,6 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 
 /**
@@ -115,11 +114,14 @@ public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
     private List<Integer> stmtIds;
     
     public DeltaMetaData() {
-      this(0,0,null);
+      this(0,0,new ArrayList<Integer>());
     }
     DeltaMetaData(long minTxnId, long maxTxnId, List<Integer> stmtIds) {
       this.minTxnId = minTxnId;
       this.maxTxnId = maxTxnId;
+      if (stmtIds == null) {
+        throw new IllegalArgumentException("stmtIds == null");
+      }
       this.stmtIds = stmtIds;
     }
     long getMinTxnId() {
@@ -136,9 +138,6 @@ public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
       out.writeLong(minTxnId);
       out.writeLong(maxTxnId);
       out.writeInt(stmtIds.size());
-      if(stmtIds == null) {
-        return;
-      }
       for(Integer id : stmtIds) {
         out.writeInt(id);
       }
@@ -147,11 +146,8 @@ public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
     public void readFields(DataInput in) throws IOException {
       minTxnId = in.readLong();
       maxTxnId = in.readLong();
+      stmtIds.clear();
       int numStatements = in.readInt();
-      if(numStatements <= 0) {
-        return;
-      }
-      stmtIds = new ArrayList<>();
       for(int i = 0; i < numStatements; i++) {
         stmtIds.add(in.readInt());
       }


[45/55] [abbrv] hive git commit: HIVE-12156: expanding view doesn't quote reserved keyword (Pengcheng Xiong, reviewed by Laljo John Pullokkaran)

Posted by jx...@apache.org.
HIVE-12156: expanding view doesn't quote reserved keyword (Pengcheng Xiong, reviewed by Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4d2df795
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4d2df795
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4d2df795

Branch: refs/heads/master-fixed
Commit: 4d2df795c1e0ac3fe378486a6aba5fa0a3d9ffbb
Parents: 3511df7
Author: Pengcheng Xiong <px...@apache.org>
Authored: Thu Nov 5 11:26:05 2015 -0800
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Thu Nov 5 11:26:05 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  37 ++++++
 .../queries/clientpositive/struct_in_view.q     |  28 +++++
 .../results/clientpositive/struct_in_view.q.out | 118 +++++++++++++++++++
 3 files changed, 183 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4d2df795/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index d2c3a7c..f3d7057 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -26,6 +26,8 @@ import java.io.Serializable;
 import java.security.AccessControlException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -155,6 +157,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
+import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc;
@@ -10496,8 +10499,16 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       return nodeOutputs;
     }
 
+    Map<ExprNodeDesc,String> nodeToText = new HashMap<>();
+    List<Entry<ASTNode, ExprNodeDesc>> fieldDescList = new ArrayList<>();
+
     for (Map.Entry<ASTNode, ExprNodeDesc> entry : nodeOutputs.entrySet()) {
       if (!(entry.getValue() instanceof ExprNodeColumnDesc)) {
+        // we need to translate the ExprNodeFieldDesc too, e.g., identifiers in
+        // struct<>.
+        if (entry.getValue() instanceof ExprNodeFieldDesc) {
+          fieldDescList.add(entry);
+        }
         continue;
       }
       ASTNode node = entry.getKey();
@@ -10513,9 +10524,35 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       replacementText.append(HiveUtils.unparseIdentifier(tmp[0], conf));
       replacementText.append(".");
       replacementText.append(HiveUtils.unparseIdentifier(tmp[1], conf));
+      nodeToText.put(columnDesc, replacementText.toString());
       unparseTranslator.addTranslation(node, replacementText.toString());
     }
 
+    if (fieldDescList.size() != 0) {
+      // Sorting the list based on the length of fieldName
+      // For example, in Column[a].b.c and Column[a].b, Column[a].b should be
+      // unparsed before Column[a].b.c
+      Collections.sort(fieldDescList, new Comparator<Map.Entry<ASTNode, ExprNodeDesc>>() {
+        public int compare(Entry<ASTNode, ExprNodeDesc> o1, Entry<ASTNode, ExprNodeDesc> o2) {
+          ExprNodeFieldDesc fieldDescO1 = (ExprNodeFieldDesc) o1.getValue();
+          ExprNodeFieldDesc fieldDescO2 = (ExprNodeFieldDesc) o2.getValue();
+          return fieldDescO1.toString().length() < fieldDescO2.toString().length() ? -1 : 1;
+        }
+      });
+      for (Map.Entry<ASTNode, ExprNodeDesc> entry : fieldDescList) {
+        ASTNode node = entry.getKey();
+        ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) entry.getValue();
+        ExprNodeDesc exprNodeDesc = fieldDesc.getDesc();
+        String fieldName = fieldDesc.getFieldName();
+        StringBuilder replacementText = new StringBuilder();
+        replacementText.append(nodeToText.get(exprNodeDesc));
+        replacementText.append(".");
+        replacementText.append(HiveUtils.unparseIdentifier(fieldName, conf));
+        nodeToText.put(fieldDesc, replacementText.toString());
+        unparseTranslator.addTranslation(node, replacementText.toString());
+      }
+    }
+
     return nodeOutputs;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4d2df795/ql/src/test/queries/clientpositive/struct_in_view.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/struct_in_view.q b/ql/src/test/queries/clientpositive/struct_in_view.q
new file mode 100644
index 0000000..d420030
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/struct_in_view.q
@@ -0,0 +1,28 @@
+drop table testreserved;
+
+create table testreserved (data struct<`end`:string, id: string>);
+
+create view testreservedview as select data.`end` as data_end, data.id as data_id from testreserved;
+
+describe extended testreservedview;
+
+select data.`end` from testreserved;
+
+drop view testreservedview;
+
+drop table testreserved;
+
+create table s (default struct<src:struct<`end`:struct<key:string>, id: string>, id: string>);
+
+create view vs1 as select default.src.`end`.key from s;
+
+describe extended vs1;
+
+create view vs2 as select default.src.`end` from s;
+
+describe extended vs2;
+
+drop view vs1;
+
+drop view vs2;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/4d2df795/ql/src/test/results/clientpositive/struct_in_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/struct_in_view.q.out b/ql/src/test/results/clientpositive/struct_in_view.q.out
new file mode 100644
index 0000000..10b2f2e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/struct_in_view.q.out
@@ -0,0 +1,118 @@
+PREHOOK: query: drop table testreserved
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table testreserved
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table testreserved (data struct<`end`:string, id: string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testreserved
+POSTHOOK: query: create table testreserved (data struct<`end`:string, id: string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testreserved
+PREHOOK: query: create view testreservedview as select data.`end` as data_end, data.id as data_id from testreserved
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testreserved
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testreservedview
+POSTHOOK: query: create view testreservedview as select data.`end` as data_end, data.id as data_id from testreserved
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testreserved
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testreservedview
+PREHOOK: query: describe extended testreservedview
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@testreservedview
+POSTHOOK: query: describe extended testreservedview
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@testreservedview
+data_end            	string              	                    
+data_id             	string              	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: select data.`end` from testreserved
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testreserved
+#### A masked pattern was here ####
+POSTHOOK: query: select data.`end` from testreserved
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testreserved
+#### A masked pattern was here ####
+PREHOOK: query: drop view testreservedview
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@testreservedview
+PREHOOK: Output: default@testreservedview
+POSTHOOK: query: drop view testreservedview
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@testreservedview
+POSTHOOK: Output: default@testreservedview
+PREHOOK: query: drop table testreserved
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@testreserved
+PREHOOK: Output: default@testreserved
+POSTHOOK: query: drop table testreserved
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@testreserved
+POSTHOOK: Output: default@testreserved
+PREHOOK: query: create table s (default struct<src:struct<`end`:struct<key:string>, id: string>, id: string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@s
+POSTHOOK: query: create table s (default struct<src:struct<`end`:struct<key:string>, id: string>, id: string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@s
+PREHOOK: query: create view vs1 as select default.src.`end`.key from s
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@s
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vs1
+POSTHOOK: query: create view vs1 as select default.src.`end`.key from s
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@s
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vs1
+PREHOOK: query: describe extended vs1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@vs1
+POSTHOOK: query: describe extended vs1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@vs1
+key                 	string              	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: create view vs2 as select default.src.`end` from s
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@s
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vs2
+POSTHOOK: query: create view vs2 as select default.src.`end` from s
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@s
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vs2
+PREHOOK: query: describe extended vs2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@vs2
+POSTHOOK: query: describe extended vs2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@vs2
+end                 	struct<key:string>  	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: drop view vs1
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@vs1
+PREHOOK: Output: default@vs1
+POSTHOOK: query: drop view vs1
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@vs1
+POSTHOOK: Output: default@vs1
+PREHOOK: query: drop view vs2
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@vs2
+PREHOOK: Output: default@vs2
+POSTHOOK: query: drop view vs2
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@vs2
+POSTHOOK: Output: default@vs2


[23/55] [abbrv] hive git commit: HIVE-12287: Lineage for lateral view shows wrong dependencies (Jimmy, reviewed by Chao)

Posted by jx...@apache.org.
HIVE-12287: Lineage for lateral view shows wrong dependencies (Jimmy, reviewed by Chao)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d5fdeed6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d5fdeed6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d5fdeed6

Branch: refs/heads/master-fixed
Commit: d5fdeed64cf36844bc3e26dff798a31e5f2643c4
Parents: 02629e9
Author: Jimmy Xiang <jx...@cloudera.com>
Authored: Wed Oct 28 14:47:45 2015 -0700
Committer: Jimmy Xiang <jx...@apache.org>
Committed: Tue Nov 3 09:35:54 2015 -0800

----------------------------------------------------------------------
 .../ql/optimizer/lineage/OpProcFactory.java     | 25 +++++---------
 ql/src/test/queries/clientpositive/lineage2.q   | 18 ++++++++++
 .../clientpositive/infer_bucket_sort.q.out      |  4 +--
 .../test/results/clientpositive/lineage2.q.out  | 30 ++++++++++++++++
 .../clientpositive/load_dyn_part15.q.out        |  6 ++--
 .../multi_insert_lateral_view.q.out             | 36 ++++++++++----------
 .../clientpositive/spark/load_dyn_part15.q.out  |  6 ++--
 .../spark/multi_insert_lateral_view.q.out       | 36 ++++++++++----------
 .../spark/union_lateralview.q.out               |  4 +--
 .../tez/vectorized_distinct_gby.q.out           |  4 +--
 .../clientpositive/union_lateralview.q.out      |  4 +--
 .../vectorized_distinct_gby.q.out               |  4 +--
 12 files changed, 109 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
index 5c5d0d6..d95b45b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
@@ -324,26 +324,19 @@ public class OpProcFactory {
       }
 
       // Dirty hack!!
-      // For the select path the columns are the ones at the end of the
+      // For the select path the columns are the ones at the beginning of the
       // current operators schema and for the udtf path the columns are
-      // at the beginning of the operator schema.
+      // at the end of the operator schema.
       ArrayList<ColumnInfo> out_cols = op.getSchema().getSignature();
       int out_cols_size = out_cols.size();
       int cols_size = cols.size();
-      if (isUdtfPath) {
-        int cnt = 0;
-        while (cnt < cols_size) {
-          lCtx.getIndex().mergeDependency(op, out_cols.get(cnt),
-              lCtx.getIndex().getDependency(inpOp, cols.get(cnt)));
-          cnt++;
-        }
-      }
-      else {
-        int cnt = cols_size - 1;
-        while (cnt >= 0) {
-          lCtx.getIndex().mergeDependency(op, out_cols.get(out_cols_size - cols_size + cnt),
-              lCtx.getIndex().getDependency(inpOp, cols.get(cnt)));
-          cnt--;
+      int outColOffset = isUdtfPath ? out_cols_size - cols_size : 0;
+      for (int cnt = 0; cnt < cols_size; cnt++) {
+        ColumnInfo outCol = out_cols.get(outColOffset + cnt);
+        if (!outCol.isHiddenVirtualCol()) {
+          ColumnInfo col = cols.get(cnt);
+          lCtx.getIndex().mergeDependency(op, outCol,
+            lCtx.getIndex().getDependency(inpOp, col));
         }
       }
       return null;

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/queries/clientpositive/lineage2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lineage2.q b/ql/src/test/queries/clientpositive/lineage2.q
index 6bcd1d7..d3fe64a 100644
--- a/ql/src/test/queries/clientpositive/lineage2.q
+++ b/ql/src/test/queries/clientpositive/lineage2.q
@@ -114,3 +114,21 @@ concat(substr(src1.key,1,1),sum(substr(src1.value,5)))
 from src1
 GROUP BY substr(src1.key,1,1);
 
+drop table if exists relations;
+create table relations (identity char(32), type string,
+  ep1_src_type string, ep1_type string, ep2_src_type string, ep2_type string,
+  ep1_ids array<string>, ep2_ids array<string>);
+
+drop table if exists rels_exploded;
+create table rels_exploded (identity char(32), type string,
+  ep1_src_type string, ep1_type string, ep2_src_type string, ep2_type string,
+  ep1_id char(32), ep2_id char(32));
+
+select identity, ep1_id from relations
+  lateral view explode(ep1_ids) nav_rel as ep1_id;
+
+insert into rels_exploded select identity, type,
+  ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id
+from relations lateral view explode(ep1_ids) rel1 as ep1_id
+  lateral view explode (ep2_ids) rel2 as ep2_id;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
index 5d69e2f..1e584e0 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
@@ -978,8 +978,8 @@ SELECT key, value FROM (SELECT key FROM src group by key) a lateral view explode
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@test_table@part=1
-POSTHOOK: Lineage: test_table PARTITION(part=1).key SCRIPT []
-POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table PARTITION(part=1).value SCRIPT []
 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1')
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@test_table

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/lineage2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage2.q.out b/ql/src/test/results/clientpositive/lineage2.q.out
index 4184a83..0185d43 100644
--- a/ql/src/test/results/clientpositive/lineage2.q.out
+++ b/ql/src/test/results/clientpositive/lineage2.q.out
@@ -675,3 +675,33 @@ PREHOOK: Input: default@src1
 4	2	4807.0
 6	1	666.0
 9	1	998.0
+PREHOOK: query: drop table if exists relations
+PREHOOK: type: DROPTABLE
+PREHOOK: query: create table relations (identity char(32), type string,
+  ep1_src_type string, ep1_type string, ep2_src_type string, ep2_type string,
+  ep1_ids array<string>, ep2_ids array<string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@relations
+PREHOOK: query: drop table if exists rels_exploded
+PREHOOK: type: DROPTABLE
+PREHOOK: query: create table rels_exploded (identity char(32), type string,
+  ep1_src_type string, ep1_type string, ep2_src_type string, ep2_type string,
+  ep1_id char(32), ep2_id char(32))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@rels_exploded
+PREHOOK: query: select identity, ep1_id from relations
+  lateral view explode(ep1_ids) nav_rel as ep1_id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@relations
+#### A masked pattern was here ####
+{"version":"1.0","engine":"mr","hash":"bb30b94d13d0b35802db85b4e33230b3","queryText":"select identity, ep1_id from relations\n  lateral view explode(ep1_ids) nav_rel as ep1_id","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"nav_rel._col11","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"identity"},{"id":1,"vertexType":"COLUMN","vertexId":"ep1_id"},{"id":2,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":3,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"}]}
+PREHOOK: query: insert into rels_exploded select identity, type,
+  ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id
+from relations lateral view explode(ep1_ids) rel1 as ep1_id
+  lateral view explode (ep2_ids) rel2 as ep2_id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@relations
+PREHOOK: Output: default@rels_exploded
+{"version":"1.0","engine":"mr","hash":"e76d2efade744d1d5cf74fda064ba6c6","queryText":"insert into rels_exploded select identity, type,\n  ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id\nfrom relations lateral view explode(ep1_ids) rel1 as ep1_id\n  lateral view explode (ep2_ids) rel2 as ep2_id","edges":[{"sources":[8],"targets":[0],"edgeType":"PROJECTION"},{"sources":[9],"targets":[1],"edgeType":"PROJECTION"},{"sources":[10],"targets":[2],"edgeType":"PROJECTION"},{"sources":[11],"targets":[3],"edgeType":"PROJECTION"},{"sources":[12],"targets":[4],"edgeType":"PROJECTION"},{"sources":[13],"targets":[5],"edgeType":"PROJECTION"},{"sources":[14],"targets":[6],"expression":"CAST( rel1._col11 AS CHAR(32)","edgeType":"PROJECTION"},{"sources":[15],"targets":[7],"expression":"CAST( rel2._col12 AS CHAR(32)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.rels_exploded.identity"},{"id":1,"vertexType":"COLUMN","vertexId":"default.rels_explo
 ded.type"},{"id":2,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_src_type"},{"id":3,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_type"},{"id":4,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_src_type"},{"id":5,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_type"},{"id":6,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_id"},{"id":7,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_id"},{"id":8,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":9,"vertexType":"COLUMN","vertexId":"default.relations.type"},{"id":10,"vertexType":"COLUMN","vertexId":"default.relations.ep1_src_type"},{"id":11,"vertexType":"COLUMN","vertexId":"default.relations.ep1_type"},{"id":12,"vertexType":"COLUMN","vertexId":"default.relations.ep2_src_type"},{"id":13,"vertexType":"COLUMN","vertexId":"default.relations.ep2_type"},{"id":14,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"},{"id":15,"vertexType":"COLU
 MN","vertexId":"default.relations.ep2_ids"}]}

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/load_dyn_part15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part15.q.out b/ql/src/test/results/clientpositive/load_dyn_part15.q.out
index b105497..4c9cd6d 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part15.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part15.q.out
@@ -26,9 +26,9 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@load_dyn_part15_test@part_key=%7B2
 POSTHOOK: Output: default@load_dyn_part15_test@part_key=1
 POSTHOOK: Output: default@load_dyn_part15_test@part_key=3%5D
-POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key=1).key SCRIPT []
-POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key=3]).key SCRIPT []
-POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key={2).key SCRIPT []
+POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key=3]).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key={2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: show partitions load_dyn_part15_test
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@load_dyn_part15_test

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
index 4723153..07a0f3e 100644
--- a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
@@ -285,10 +285,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_10
 POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
-POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv1.value SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv2.value SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: select * from src_lv1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_lv1
@@ -548,10 +548,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_10
 POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
-POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: select * from src_lv1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_lv1
@@ -800,8 +800,8 @@ POSTHOOK: Input: default@src_10
 POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
 POSTHOOK: Output: default@src_lv3
-POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
@@ -1133,10 +1133,10 @@ POSTHOOK: Input: default@src_10
 POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
 POSTHOOK: Output: default@src_lv3
-POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: select * from src_lv1
@@ -1512,10 +1512,10 @@ POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
 POSTHOOK: Output: default@src_lv3
 POSTHOOK: Output: default@src_lv4
-POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.value EXPRESSION [(src_10)src_10.null, ]
 POSTHOOK: Lineage: src_lv4.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/spark/load_dyn_part15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part15.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part15.q.out
index b105497..4c9cd6d 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part15.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part15.q.out
@@ -26,9 +26,9 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@load_dyn_part15_test@part_key=%7B2
 POSTHOOK: Output: default@load_dyn_part15_test@part_key=1
 POSTHOOK: Output: default@load_dyn_part15_test@part_key=3%5D
-POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key=1).key SCRIPT []
-POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key=3]).key SCRIPT []
-POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key={2).key SCRIPT []
+POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key=3]).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: load_dyn_part15_test PARTITION(part_key={2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: show partitions load_dyn_part15_test
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@load_dyn_part15_test

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
index 6aec979..d000ad7 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
@@ -200,10 +200,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_10
 POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
-POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv1.value SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv2.value SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: select * from src_lv1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_lv1
@@ -464,10 +464,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_10
 POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
-POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: select * from src_lv1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_lv1
@@ -717,8 +717,8 @@ POSTHOOK: Input: default@src_10
 POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
 POSTHOOK: Output: default@src_lv3
-POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
@@ -1045,10 +1045,10 @@ POSTHOOK: Input: default@src_10
 POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
 POSTHOOK: Output: default@src_lv3
-POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: select * from src_lv1
@@ -1419,10 +1419,10 @@ POSTHOOK: Output: default@src_lv1
 POSTHOOK: Output: default@src_lv2
 POSTHOOK: Output: default@src_lv3
 POSTHOOK: Output: default@src_lv4
-POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
-POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), ]
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: src_lv3.value EXPRESSION [(src_10)src_10.null, ]
 POSTHOOK: Lineage: src_lv4.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/spark/union_lateralview.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_lateralview.q.out b/ql/src/test/results/clientpositive/spark/union_lateralview.q.out
index eaa312a..c223ac6 100644
--- a/ql/src/test/results/clientpositive/spark/union_lateralview.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_lateralview.q.out
@@ -242,9 +242,9 @@ POSTHOOK: Input: default@src
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 POSTHOOK: Output: default@test_union_lateral_view
-POSTHOOK: Lineage: test_union_lateral_view.arr_ele EXPRESSION []
+POSTHOOK: Lineage: test_union_lateral_view.arr_ele SCRIPT []
 POSTHOOK: Lineage: test_union_lateral_view.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_union_lateral_view.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_union_lateral_view.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select key, arr_ele, value from test_union_lateral_view order by key, arr_ele limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@test_union_lateral_view

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out b/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
index da5ce13..db604f8 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
@@ -14,8 +14,8 @@ POSTHOOK: query: insert into table dtest select c,b from (select array(300,300,3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dtest
-POSTHOOK: Lineage: dtest.a SIMPLE []
-POSTHOOK: Lineage: dtest.b EXPRESSION []
+POSTHOOK: Lineage: dtest.a SCRIPT []
+POSTHOOK: Lineage: dtest.b SIMPLE []
 PREHOOK: query: explain select sum(distinct a), count(distinct a) from dtest
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select sum(distinct a), count(distinct a) from dtest

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/union_lateralview.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_lateralview.q.out b/ql/src/test/results/clientpositive/union_lateralview.q.out
index 734c1f4..7b89e50 100644
--- a/ql/src/test/results/clientpositive/union_lateralview.q.out
+++ b/ql/src/test/results/clientpositive/union_lateralview.q.out
@@ -236,9 +236,9 @@ POSTHOOK: Input: default@src
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 POSTHOOK: Output: default@test_union_lateral_view
-POSTHOOK: Lineage: test_union_lateral_view.arr_ele EXPRESSION []
+POSTHOOK: Lineage: test_union_lateral_view.arr_ele SCRIPT []
 POSTHOOK: Lineage: test_union_lateral_view.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_union_lateral_view.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_union_lateral_view.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select key, arr_ele, value from test_union_lateral_view order by key, arr_ele limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@test_union_lateral_view

http://git-wip-us.apache.org/repos/asf/hive/blob/d5fdeed6/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out b/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out
index b5c667f..9bc25ba 100644
--- a/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out
@@ -14,8 +14,8 @@ POSTHOOK: query: insert into table dtest select c,b from (select array(300,300,3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dtest
-POSTHOOK: Lineage: dtest.a SIMPLE []
-POSTHOOK: Lineage: dtest.b EXPRESSION []
+POSTHOOK: Lineage: dtest.a SCRIPT []
+POSTHOOK: Lineage: dtest.b SIMPLE []
 PREHOOK: query: explain select sum(distinct a), count(distinct a) from dtest
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select sum(distinct a), count(distinct a) from dtest


[04/55] [abbrv] hive git commit: HIVE-12215: Exchange partition does not show outputs field for post/pre execute hooks (Aihua Xu, reviewed by Xuefu Zhang)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index c8f16a7..3d7cb18 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -66,6 +66,7 @@ class ThriftHiveMetastoreIf : virtual public  ::facebook::fb303::FacebookService
   virtual void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) = 0;
   virtual void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals) = 0;
   virtual void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) = 0;
+  virtual void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) = 0;
   virtual void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names) = 0;
   virtual void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) = 0;
   virtual void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0;
@@ -320,6 +321,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
   void exchange_partition(Partition& /* _return */, const std::map<std::string, std::string> & /* partitionSpecs */, const std::string& /* source_db */, const std::string& /* source_table_name */, const std::string& /* dest_db */, const std::string& /* dest_table_name */) {
     return;
   }
+  void exchange_partitions(std::vector<Partition> & /* _return */, const std::map<std::string, std::string> & /* partitionSpecs */, const std::string& /* source_db */, const std::string& /* source_table_name */, const std::string& /* dest_db */, const std::string& /* dest_table_name */) {
+    return;
+  }
   void get_partition_with_auth(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const std::string& /* user_name */, const std::vector<std::string> & /* group_names */) {
     return;
   }
@@ -6328,6 +6332,170 @@ class ThriftHiveMetastore_exchange_partition_presult {
 
 };
 
+typedef struct _ThriftHiveMetastore_exchange_partitions_args__isset {
+  _ThriftHiveMetastore_exchange_partitions_args__isset() : partitionSpecs(false), source_db(false), source_table_name(false), dest_db(false), dest_table_name(false) {}
+  bool partitionSpecs :1;
+  bool source_db :1;
+  bool source_table_name :1;
+  bool dest_db :1;
+  bool dest_table_name :1;
+} _ThriftHiveMetastore_exchange_partitions_args__isset;
+
+class ThriftHiveMetastore_exchange_partitions_args {
+ public:
+
+  ThriftHiveMetastore_exchange_partitions_args(const ThriftHiveMetastore_exchange_partitions_args&);
+  ThriftHiveMetastore_exchange_partitions_args& operator=(const ThriftHiveMetastore_exchange_partitions_args&);
+  ThriftHiveMetastore_exchange_partitions_args() : source_db(), source_table_name(), dest_db(), dest_table_name() {
+  }
+
+  virtual ~ThriftHiveMetastore_exchange_partitions_args() throw();
+  std::map<std::string, std::string>  partitionSpecs;
+  std::string source_db;
+  std::string source_table_name;
+  std::string dest_db;
+  std::string dest_table_name;
+
+  _ThriftHiveMetastore_exchange_partitions_args__isset __isset;
+
+  void __set_partitionSpecs(const std::map<std::string, std::string> & val);
+
+  void __set_source_db(const std::string& val);
+
+  void __set_source_table_name(const std::string& val);
+
+  void __set_dest_db(const std::string& val);
+
+  void __set_dest_table_name(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_exchange_partitions_args & rhs) const
+  {
+    if (!(partitionSpecs == rhs.partitionSpecs))
+      return false;
+    if (!(source_db == rhs.source_db))
+      return false;
+    if (!(source_table_name == rhs.source_table_name))
+      return false;
+    if (!(dest_db == rhs.dest_db))
+      return false;
+    if (!(dest_table_name == rhs.dest_table_name))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_exchange_partitions_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_exchange_partitions_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_exchange_partitions_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_exchange_partitions_pargs() throw();
+  const std::map<std::string, std::string> * partitionSpecs;
+  const std::string* source_db;
+  const std::string* source_table_name;
+  const std::string* dest_db;
+  const std::string* dest_table_name;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_exchange_partitions_result__isset {
+  _ThriftHiveMetastore_exchange_partitions_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+  bool o4 :1;
+} _ThriftHiveMetastore_exchange_partitions_result__isset;
+
+class ThriftHiveMetastore_exchange_partitions_result {
+ public:
+
+  ThriftHiveMetastore_exchange_partitions_result(const ThriftHiveMetastore_exchange_partitions_result&);
+  ThriftHiveMetastore_exchange_partitions_result& operator=(const ThriftHiveMetastore_exchange_partitions_result&);
+  ThriftHiveMetastore_exchange_partitions_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_exchange_partitions_result() throw();
+  std::vector<Partition>  success;
+  MetaException o1;
+  NoSuchObjectException o2;
+  InvalidObjectException o3;
+  InvalidInputException o4;
+
+  _ThriftHiveMetastore_exchange_partitions_result__isset __isset;
+
+  void __set_success(const std::vector<Partition> & val);
+
+  void __set_o1(const MetaException& val);
+
+  void __set_o2(const NoSuchObjectException& val);
+
+  void __set_o3(const InvalidObjectException& val);
+
+  void __set_o4(const InvalidInputException& val);
+
+  bool operator == (const ThriftHiveMetastore_exchange_partitions_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    if (!(o3 == rhs.o3))
+      return false;
+    if (!(o4 == rhs.o4))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_exchange_partitions_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_exchange_partitions_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_exchange_partitions_presult__isset {
+  _ThriftHiveMetastore_exchange_partitions_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+  bool o4 :1;
+} _ThriftHiveMetastore_exchange_partitions_presult__isset;
+
+class ThriftHiveMetastore_exchange_partitions_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_exchange_partitions_presult() throw();
+  std::vector<Partition> * success;
+  MetaException o1;
+  NoSuchObjectException o2;
+  InvalidObjectException o3;
+  InvalidInputException o4;
+
+  _ThriftHiveMetastore_exchange_partitions_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
 typedef struct _ThriftHiveMetastore_get_partition_with_auth_args__isset {
   _ThriftHiveMetastore_get_partition_with_auth_args__isset() : db_name(false), tbl_name(false), part_vals(false), user_name(false), group_names(false) {}
   bool db_name :1;
@@ -16868,6 +17036,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
   void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
   void send_exchange_partition(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
   void recv_exchange_partition(Partition& _return);
+  void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
+  void send_exchange_partitions(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
+  void recv_exchange_partitions(std::vector<Partition> & _return);
   void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
   void send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
   void recv_get_partition_with_auth(Partition& _return);
@@ -17177,6 +17348,7 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
   void process_drop_partitions_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_exchange_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_exchange_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_partition_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
@@ -17310,6 +17482,7 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
     processMap_["drop_partitions_req"] = &ThriftHiveMetastoreProcessor::process_drop_partitions_req;
     processMap_["get_partition"] = &ThriftHiveMetastoreProcessor::process_get_partition;
     processMap_["exchange_partition"] = &ThriftHiveMetastoreProcessor::process_exchange_partition;
+    processMap_["exchange_partitions"] = &ThriftHiveMetastoreProcessor::process_exchange_partitions;
     processMap_["get_partition_with_auth"] = &ThriftHiveMetastoreProcessor::process_get_partition_with_auth;
     processMap_["get_partition_by_name"] = &ThriftHiveMetastoreProcessor::process_get_partition_by_name;
     processMap_["get_partitions"] = &ThriftHiveMetastoreProcessor::process_get_partitions;
@@ -17849,6 +18022,16 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
     return;
   }
 
+  void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->exchange_partitions(_return, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name);
+    }
+    ifaces_[i]->exchange_partitions(_return, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name);
+    return;
+  }
+
   void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names) {
     size_t sz = ifaces_.size();
     size_t i = 0;
@@ -18815,6 +18998,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
   void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
   int32_t send_exchange_partition(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
   void recv_exchange_partition(Partition& _return, const int32_t seqid);
+  void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
+  int32_t send_exchange_partitions(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
+  void recv_exchange_partitions(std::vector<Partition> & _return, const int32_t seqid);
   void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
   int32_t send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
   void recv_get_partition_with_auth(Partition& _return, const int32_t seqid);

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index 9eca65c..a395729 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -242,6 +242,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
     printf("exchange_partition\n");
   }
 
+  void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) {
+    // Your implementation goes here
+    printf("exchange_partitions\n");
+  }
+
   void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names) {
     // Your implementation goes here
     printf("get_partition_with_auth\n");


[14/55] [abbrv] hive git commit: HIVE-11293 HiveConnection.setAutoCommit(true) throws exception (Michał Węgrzyn and Alan Gates, reviewed by Thejas Nair)

Posted by jx...@apache.org.
HIVE-11293 HiveConnection.setAutoCommit(true) throws exception (Michał Węgrzyn and Alan Gates, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1357f633
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1357f633
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1357f633

Branch: refs/heads/master-fixed
Commit: 1357f6338796600fe37b81bb11600ad56da3d4e2
Parents: 1ad1dc8
Author: Alan Gates <ga...@hortonworks.com>
Authored: Mon Nov 2 15:53:07 2015 -0800
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Mon Nov 2 15:53:07 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   | 89 ++++++++++++------
 .../org/apache/hive/jdbc/HiveConnection.java    | 96 +++++++++++---------
 2 files changed, 113 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1357f633/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 995a33d..ced454f 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -18,14 +18,28 @@
 
 package org.apache.hive.jdbc;
 
-import static org.apache.hadoop.hive.conf.SystemVariables.SET_COLUMN_NAME;
-import static org.apache.hadoop.hive.ql.exec.ExplainTask.EXPL_COLUMN_NAME;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.ql.exec.UDF;
+import org.apache.hadoop.hive.ql.processors.DfsProcessor;
+import org.apache.hive.common.util.HiveVersionInfo;
+import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
+import org.apache.hive.service.cli.operation.ClassicTableTypeMapping;
+import org.apache.hive.service.cli.operation.ClassicTableTypeMapping.ClassicTableTypes;
+import org.apache.hive.service.cli.operation.HiveTableTypeMapping;
+import org.apache.hive.service.cli.operation.TableTypeMappingFactory.TableTypeMappings;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.InputStream;
 import java.sql.Connection;
@@ -36,6 +50,7 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
+import java.sql.SQLWarning;
 import java.sql.Statement;
 import java.sql.Timestamp;
 import java.sql.Types;
@@ -49,26 +64,14 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.regex.Pattern;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
-import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.ql.exec.UDF;
-import org.apache.hadoop.hive.ql.processors.DfsProcessor;
-import org.apache.hive.common.util.HiveVersionInfo;
-import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
-import org.apache.hive.service.cli.operation.ClassicTableTypeMapping;
-import org.apache.hive.service.cli.operation.ClassicTableTypeMapping.ClassicTableTypes;
-import org.apache.hive.service.cli.operation.HiveTableTypeMapping;
-import org.apache.hive.service.cli.operation.TableTypeMappingFactory.TableTypeMappings;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import static org.apache.hadoop.hive.conf.SystemVariables.SET_COLUMN_NAME;
+import static org.apache.hadoop.hive.ql.exec.ExplainTask.EXPL_COLUMN_NAME;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 
 /**
@@ -96,6 +99,8 @@ public class TestJdbcDriver2 {
   private static boolean standAloneServer = false;
   private static final float floatCompareDelta = 0.0001f;
 
+  @Rule public ExpectedException thrown = ExpectedException.none();
+
   public TestJdbcDriver2() {
     conf = new HiveConf(TestJdbcDriver2.class);
     dataFileDir = conf.get("test.data.files").replace('\\', '/')
@@ -2414,4 +2419,32 @@ public void testParseUrlHttpMode() throws SQLException, JdbcUriParseException,
       }
     }
   }
+
+  @Test
+  public void testAutoCommit() throws Exception {
+    con.clearWarnings();
+    con.setAutoCommit(true);
+    assertNull(con.getWarnings());
+    con.setAutoCommit(false);
+    SQLWarning warning = con.getWarnings();
+    assertNotNull(warning);
+    assertEquals("Hive does not support autoCommit=false", warning.getMessage());
+    assertNull(warning.getNextWarning());
+    con.clearWarnings();
+  }
+
+  @Test
+  public void setAutoCommitOnClosedConnection() throws Exception {
+    Connection mycon = getConnection("");
+    try {
+      mycon.setAutoCommit(true);
+      mycon.close();
+      thrown.expect(SQLException.class);
+      thrown.expectMessage("Connection is closed");
+      mycon.setAutoCommit(true);
+    } finally {
+      mycon.close();
+    }
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1357f633/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 920d50f..e38c585 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -18,48 +18,6 @@
 
 package org.apache.hive.jdbc;
 
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.lang.reflect.InvocationHandler;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
-import java.security.KeyStore;
-import java.security.SecureRandom;
-import java.sql.Array;
-import java.sql.Blob;
-import java.sql.CallableStatement;
-import java.sql.Clob;
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.DriverManager;
-import java.sql.NClob;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLClientInfoException;
-import java.sql.SQLException;
-import java.sql.SQLWarning;
-import java.sql.SQLXML;
-import java.sql.Savepoint;
-import java.sql.Statement;
-import java.sql.Struct;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.concurrent.Executor;
-import java.util.concurrent.TimeUnit;
-
-import javax.net.ssl.KeyManagerFactory;
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.TrustManagerFactory;
-import javax.security.sasl.Sasl;
-import javax.security.sasl.SaslException;
-
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.protocol.HttpContext;
 import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
 import org.apache.hive.service.auth.HiveAuthFactory;
 import org.apache.hive.service.auth.KerberosSaslHelper;
@@ -87,9 +45,11 @@ import org.apache.http.config.RegistryBuilder;
 import org.apache.http.conn.socket.ConnectionSocketFactory;
 import org.apache.http.conn.ssl.SSLSocketFactory;
 import org.apache.http.impl.client.BasicCookieStore;
+import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.http.impl.client.HttpClientBuilder;
 import org.apache.http.impl.client.HttpClients;
 import org.apache.http.impl.conn.BasicHttpClientConnectionManager;
+import org.apache.http.protocol.HttpContext;
 import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.transport.THttpClient;
@@ -98,6 +58,45 @@ import org.apache.thrift.transport.TTransportException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManagerFactory;
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslException;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.security.KeyStore;
+import java.security.SecureRandom;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.DriverManager;
+import java.sql.NClob;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLClientInfoException;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Savepoint;
+import java.sql.Statement;
+import java.sql.Struct;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.concurrent.Executor;
+import java.util.concurrent.TimeUnit;
+
 /**
  * HiveConnection.
  *
@@ -1216,8 +1215,17 @@ public class HiveConnection implements java.sql.Connection {
 
   @Override
   public void setAutoCommit(boolean autoCommit) throws SQLException {
-    if (autoCommit) {
-      throw new SQLException("enabling autocommit is not supported");
+    // Per JDBC spec, if the connection is closed a SQLException should be thrown.
+    if(isClosed) {
+      throw new SQLException("Connection is closed");
+    }
+    // The auto-commit mode is always enabled for this connection. Per JDBC spec,
+    // if setAutoCommit is called and the auto-commit mode is not changed, the call is a no-op.
+    if (!autoCommit) {
+      LOG.warn("Request to set autoCommit to false; Hive does not support autoCommit=false.");
+      SQLWarning warning = new SQLWarning("Hive does not support autoCommit=false");
+      if (warningChain == null) warningChain = warning;
+      else warningChain.setNextWarning(warning);
     }
   }
 


[55/55] [abbrv] hive git commit: HIVE-11726: Pushed IN predicates to the metastore (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)

Posted by jx...@apache.org.
HIVE-11726: Pushed IN predicates to the metastore (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e8076ef4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e8076ef4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e8076ef4

Branch: refs/heads/master-fixed
Commit: e8076ef41de842b9adceea4f854a92d9d1d1388b
Parents: 898834e
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue Nov 3 18:32:14 2015 +0200
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Nov 6 17:57:12 2015 +0100

----------------------------------------------------------------------
 .../hadoop/hive/metastore/parser/Filter.g       | 218 ++++++++
 .../test/queries/clientpositive/pointlookup4.q  |  27 +
 .../results/clientpositive/pointlookup4.q.out   | 530 +++++++++++++++++++
 3 files changed, 775 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e8076ef4/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
index 8aef5bf..81111a0 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
@@ -122,6 +122,10 @@ operatorExpression
     :
     betweenExpression
     |
+    inExpression
+    |
+    multiColInExpression
+    |
     binOpExpression
     ;
 
@@ -203,16 +207,229 @@ betweenExpression
         tree.addIntermediateNode(isPositive ? LogicalOperator.AND : LogicalOperator.OR);
     };
 
+inExpression
+@init {
+    List constants = new ArrayList();
+    Object constantV = null;
+    boolean isPositive = true;
+}
+    :
+    (
+        LPAREN key = Identifier RPAREN ( KW_NOT { isPositive = false; } )? IN LPAREN
+        (
+            (
+                constant = DateLiteral
+                {
+                    constantV = FilterLexer.ExtractDate(constant.getText());
+                    constants.add(constantV);
+                }
+                (
+                    COMMA constant = DateLiteral
+                    {
+                        constantV = FilterLexer.ExtractDate(constant.getText());
+                        constants.add(constantV);
+                    }
+                )*
+            )
+            |
+            (
+                constant = StringLiteral
+                {
+                    constantV = TrimQuotes(constant.getText());
+                    constants.add(constantV);
+                }
+                (
+                    COMMA constant = StringLiteral
+                    {
+                        constantV = TrimQuotes(constant.getText());
+                        constants.add(constantV);
+                    }
+                )*
+            )
+            |
+            (
+                constant = IntegralLiteral
+                {
+                    constantV = Long.parseLong(constant.getText());
+                    constants.add(constantV);
+                }
+                (
+                    COMMA constant = IntegralLiteral
+                    {
+                        constantV = Long.parseLong(constant.getText());
+                        constants.add(constantV);
+                    }
+                )*
+            )
+        ) RPAREN
+    )
+    {
+        for (int i = 0; i < constants.size(); i++) {
+            Object value = constants.get(i);
+            LeafNode leaf = new LeafNode();
+            leaf.keyName = key.getText();
+            leaf.value = value;
+            leaf.operator = isPositive ? Operator.EQUALS : Operator.NOTEQUALS2;
+            tree.addLeafNode(leaf);
+            if (i != 0) {
+                tree.addIntermediateNode(isPositive ? LogicalOperator.OR : LogicalOperator.AND);
+            }
+        }
+    };
+
+multiColInExpression
+@init {
+    List<String> keyNames = new ArrayList<String>();
+    List constants = new ArrayList();
+    List partialConstants;
+    String keyV = null;
+    Object constantV = null;
+    boolean isPositive = true;
+}
+    :
+    (
+        LPAREN
+        (
+            KW_STRUCT LPAREN key = Identifier
+            {
+                keyV = key.getText();
+                keyNames.add(keyV);
+            }
+            (
+                COMMA key = Identifier
+                {
+                    keyV = key.getText();
+                    keyNames.add(keyV);
+                }
+            )* RPAREN
+        ) RPAREN ( KW_NOT { isPositive = false; } )? IN LPAREN KW_CONST KW_STRUCT LPAREN
+        {
+            partialConstants = new ArrayList();
+        }
+        (
+            constant = DateLiteral
+            {
+                constantV = FilterLexer.ExtractDate(constant.getText());
+                partialConstants.add(constantV);
+            }
+            | constant = StringLiteral
+            {
+                constantV = TrimQuotes(constant.getText());
+                partialConstants.add(constantV);
+            }
+            | constant = IntegralLiteral
+            {
+                constantV = Long.parseLong(constant.getText());
+                partialConstants.add(constantV);
+            }
+        )
+        (
+            COMMA
+            (
+                constant = DateLiteral
+                {
+                    constantV = FilterLexer.ExtractDate(constant.getText());
+                    partialConstants.add(constantV);
+                }
+                | constant = StringLiteral
+                {
+                    constantV = TrimQuotes(constant.getText());
+                    partialConstants.add(constantV);
+                }
+                | constant = IntegralLiteral
+                {
+                    constantV = Long.parseLong(constant.getText());
+                    partialConstants.add(constantV);
+                }
+            )
+        )*
+        {
+            constants.add(partialConstants);
+        }
+        RPAREN
+        (
+            COMMA KW_CONST KW_STRUCT LPAREN
+            {
+                partialConstants = new ArrayList();
+            }
+            (
+                constant = DateLiteral
+                {
+                    constantV = FilterLexer.ExtractDate(constant.getText());
+                    partialConstants.add(constantV);
+                }
+                | constant = StringLiteral
+                {
+                    constantV = TrimQuotes(constant.getText());
+                    partialConstants.add(constantV);
+                }
+                | constant = IntegralLiteral
+                {
+                    constantV = Long.parseLong(constant.getText());
+                    partialConstants.add(constantV);
+                }
+            )
+            (
+                COMMA
+                (
+                    constant = DateLiteral
+                    {
+                        constantV = FilterLexer.ExtractDate(constant.getText());
+                        partialConstants.add(constantV);
+                    }
+                    | constant = StringLiteral
+                    {
+                        constantV = TrimQuotes(constant.getText());
+                        partialConstants.add(constantV);
+                    }
+                    | constant = IntegralLiteral
+                    {
+                        constantV = Long.parseLong(constant.getText());
+                        partialConstants.add(constantV);
+                    }
+                )
+            )*
+            {
+                constants.add(partialConstants);
+            }
+            RPAREN
+        )* RPAREN
+    )
+    {
+        for (int i = 0; i < constants.size(); i++) {
+            List list = (List) constants.get(i);
+            assert keyNames.size() == list.size();
+            for (int j=0; j < list.size(); j++) {
+                String keyName = keyNames.get(j);
+                Object value = list.get(j);
+                LeafNode leaf = new LeafNode();
+                leaf.keyName = keyName;
+                leaf.value = value;
+                leaf.operator = isPositive ? Operator.EQUALS : Operator.NOTEQUALS2;
+                tree.addLeafNode(leaf);
+                if (j != 0) {
+                    tree.addIntermediateNode(isPositive ? LogicalOperator.AND : LogicalOperator.OR);
+                }
+            }
+            if (i != 0) {
+                tree.addIntermediateNode(isPositive ? LogicalOperator.OR : LogicalOperator.AND);
+            }
+        }
+    };
+
 // Keywords
 KW_NOT : 'NOT';
 KW_AND : 'AND';
 KW_OR : 'OR';
 KW_LIKE : 'LIKE';
 KW_DATE : 'date';
+KW_CONST : 'CONST';
+KW_STRUCT : 'STRUCT';
 
 // Operators
 LPAREN : '(' ;
 RPAREN : ')' ;
+COMMA : ',' ;
 EQUAL : '=';
 NOTEQUAL : '<>' | '!=';
 LESSTHANOREQUALTO : '<=';
@@ -220,6 +437,7 @@ LESSTHAN : '<';
 GREATERTHANOREQUALTO : '>=';
 GREATERTHAN : '>';
 BETWEEN : 'BETWEEN';
+IN : 'IN';
 
 // LITERALS
 fragment

http://git-wip-us.apache.org/repos/asf/hive/blob/e8076ef4/ql/src/test/queries/clientpositive/pointlookup4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/pointlookup4.q b/ql/src/test/queries/clientpositive/pointlookup4.q
new file mode 100644
index 0000000..e0bf5a6
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/pointlookup4.q
@@ -0,0 +1,27 @@
+drop table pcr_t1;
+
+create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string);
+insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key;
+insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key;
+insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key;
+
+set hive.optimize.point.lookup=false;
+set hive.optimize.partition.columns.separate=false;
+
+explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2;
+
+set hive.optimize.point.lookup=true;
+set hive.optimize.point.lookup.min=0;
+set hive.optimize.partition.columns.separate=true;
+
+explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2;
+
+drop table pcr_t1;

http://git-wip-us.apache.org/repos/asf/hive/blob/e8076ef4/ql/src/test/results/clientpositive/pointlookup4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pointlookup4.q.out b/ql/src/test/results/clientpositive/pointlookup4.q.out
new file mode 100644
index 0000000..157aea6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/pointlookup4.q.out
@@ -0,0 +1,530 @@
+PREHOOK: query: drop table pcr_t1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table pcr_t1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@pcr_t1
+POSTHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@pcr_t1
+PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08
+POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09
+POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10
+POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcr_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               key
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               value
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds1
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds2
+      TOK_WHERE
+         or
+            and
+               and
+                  =
+                     TOK_TABLE_OR_COL
+                        ds1
+                     '2000-04-08'
+                  =
+                     TOK_TABLE_OR_COL
+                        ds2
+                     '2001-04-08'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  1
+            and
+               and
+                  =
+                     TOK_TABLE_OR_COL
+                        ds1
+                     '2000-04-09'
+                  =
+                     TOK_TABLE_OR_COL
+                        ds2
+                     '2001-04-09'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  2
+      TOK_ORDERBY
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               key
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               value
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               ds1
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               ds2
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcr_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: (((ds1 = '2000-04-08') and (ds2 = '2001-04-08') and (key = 1)) or ((ds1 = '2000-04-09') and (ds2 = '2001-04-09') and (key = 2))) (type: boolean)
+              Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string), ds1 (type: string), ds2 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                  sort order: ++++
+                  Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                  tag: -1
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds2=2001-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds1 2000-04-08
+              ds2 2001-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcr_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds1/ds2
+              partition_columns.types string:string
+              rawDataSize 160
+              serialization.ddl struct pcr_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcr_t1
+                partition_columns ds1/ds2
+                partition_columns.types string:string
+                serialization.ddl struct pcr_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcr_t1
+            name: default.pcr_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds2=2001-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds1 2000-04-09
+              ds2 2001-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcr_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds1/ds2
+              partition_columns.types string:string
+              rawDataSize 160
+              serialization.ddl struct pcr_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcr_t1
+                partition_columns ds1/ds2
+                partition_columns.types string:string
+                serialization.ddl struct pcr_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcr_t1
+            name: default.pcr_t1
+      Truncated Path -> Alias:
+        /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1]
+        /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  columns _col0,_col1,_col2,_col3
+                  columns.types int:string:string:string
+                  escape.delim \
+                  hive.serialization.extend.additional.nesting.levels true
+                  serialization.escape.crlf true
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            TotalFiles: 1
+            GatherStats: false
+            MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2)
+order by key, value, ds1, ds2
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcr_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               key
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               value
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds1
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds2
+      TOK_WHERE
+         or
+            and
+               and
+                  =
+                     TOK_TABLE_OR_COL
+                        ds1
+                     '2000-04-08'
+                  =
+                     TOK_TABLE_OR_COL
+                        ds2
+                     '2001-04-08'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  1
+            and
+               and
+                  =
+                     TOK_TABLE_OR_COL
+                        ds1
+                     '2000-04-09'
+                  =
+                     TOK_TABLE_OR_COL
+                        ds2
+                     '2001-04-09'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  2
+      TOK_ORDERBY
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               key
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               value
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               ds1
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               ds2
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcr_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: (struct(ds1,key,ds2)) IN (const struct('2000-04-08',1,'2001-04-08'), const struct('2000-04-09',2,'2001-04-09')) (type: boolean)
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string), ds1 (type: string), ds2 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                  sort order: ++++
+                  Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                  tag: -1
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds2=2001-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds1 2000-04-08
+              ds2 2001-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcr_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds1/ds2
+              partition_columns.types string:string
+              rawDataSize 160
+              serialization.ddl struct pcr_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcr_t1
+                partition_columns ds1/ds2
+                partition_columns.types string:string
+                serialization.ddl struct pcr_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcr_t1
+            name: default.pcr_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds2=2001-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds1 2000-04-09
+              ds2 2001-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcr_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds1/ds2
+              partition_columns.types string:string
+              rawDataSize 160
+              serialization.ddl struct pcr_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcr_t1
+                partition_columns ds1/ds2
+                partition_columns.types string:string
+                serialization.ddl struct pcr_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcr_t1
+            name: default.pcr_t1
+      Truncated Path -> Alias:
+        /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1]
+        /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 1
+            Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  columns _col0,_col1,_col2,_col3
+                  columns.types int:string:string:string
+                  escape.delim \
+                  hive.serialization.extend.additional.nesting.levels true
+                  serialization.escape.crlf true
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            TotalFiles: 1
+            GatherStats: false
+            MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: drop table pcr_t1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@pcr_t1
+PREHOOK: Output: default@pcr_t1
+POSTHOOK: query: drop table pcr_t1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@pcr_t1
+POSTHOOK: Output: default@pcr_t1


[43/55] [abbrv] hive git commit: HIVE-12304 "drop database cascade" needs to unregister functions

Posted by jx...@apache.org.
HIVE-12304 "drop database cascade" needs to unregister functions


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dff25380
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dff25380
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dff25380

Branch: refs/heads/master-fixed
Commit: dff25380012be0782f78548f049ce645e3969076
Parents: 0f716f1
Author: aihuaxu <ai...@apache.org>
Authored: Fri Oct 30 13:31:08 2015 -0400
Committer: aihuaxu <ai...@apache.org>
Committed: Thu Nov 5 11:34:40 2015 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  7 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |  9 +++
 .../apache/hadoop/hive/ql/exec/Registry.java    | 12 +++
 .../clientnegative/drop_database_cascade.q      | 26 ++++++
 .../clientnegative/drop_database_cascade.q.out  | 85 ++++++++++++++++++++
 5 files changed, 138 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/dff25380/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index caf98b5..9ab3e98 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -3731,7 +3731,12 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   private int dropDatabase(Hive db, DropDatabaseDesc dropDb)
       throws HiveException {
     try {
-      db.dropDatabase(dropDb.getDatabaseName(), true, dropDb.getIfExists(), dropDb.isCasdade());
+      String dbName = dropDb.getDatabaseName();
+      db.dropDatabase(dbName, true, dropDb.getIfExists(), dropDb.isCasdade());
+      // Unregister the functions as well
+      if (dropDb.isCasdade()) {
+        FunctionRegistry.unregisterPermanentFunctions(dbName);
+      }
     }
     catch (NoSuchObjectException ex) {
       throw new HiveException(ex, ErrorMsg.DATABASE_NOT_EXISTS, dropDb.getDatabaseName());

http://git-wip-us.apache.org/repos/asf/hive/blob/dff25380/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index de8e98c..2196ca9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -1571,6 +1571,15 @@ public final class FunctionRegistry {
     unregisterTemporaryUDF(functionName);
   }
 
+  /**
+   * Unregisters all the functions under the database dbName
+   * @param dbName specified database name
+   * @throws HiveException
+   */
+  public static void unregisterPermanentFunctions(String dbName) throws HiveException {
+    system.unregisterFunctions(dbName);
+  }
+
   private FunctionRegistry() {
     // prevent instantiation
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/dff25380/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
index 1121819..ea9813c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
@@ -419,6 +419,18 @@ public class Registry {
     }
   }
 
+  /**
+   * Unregisters all the functions belonging to the specified database
+   * @param dbName database name
+   * @throws HiveException
+   */
+  public synchronized void unregisterFunctions(String dbName) throws HiveException {
+    Set<String> funcNames = getFunctionNames(dbName.toLowerCase() + "\\..*");
+    for (String funcName : funcNames) {
+      unregisterFunction(funcName);
+    }
+  }
+
   public GenericUDAFResolver getGenericUDAFResolver(String functionName) throws SemanticException {
     FunctionInfo info = getFunctionInfo(functionName);
     if (info != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/dff25380/ql/src/test/queries/clientnegative/drop_database_cascade.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/drop_database_cascade.q b/ql/src/test/queries/clientnegative/drop_database_cascade.q
new file mode 100644
index 0000000..d544692
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/drop_database_cascade.q
@@ -0,0 +1,26 @@
+-- This test verifies that if the functions and tables unregistered when the database is dropped
+-- and other databases are not affected
+
+CREATE DATABASE TEST_database;
+
+USE TEST_database;
+
+CREATE TABLE test_table (key STRING, value STRING);
+
+CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
+
+USE default;
+
+CREATE TABLE test_table (key STRING, value STRING);
+
+CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
+
+DROP DATABASE TEST_database CASCADE;
+
+describe test_table;
+
+describe function test_func;
+
+describe function TEST_database.test_func;
+
+describe TEST_database.test_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/dff25380/ql/src/test/results/clientnegative/drop_database_cascade.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/drop_database_cascade.q.out b/ql/src/test/results/clientnegative/drop_database_cascade.q.out
new file mode 100644
index 0000000..304b967
--- /dev/null
+++ b/ql/src/test/results/clientnegative/drop_database_cascade.q.out
@@ -0,0 +1,85 @@
+PREHOOK: query: -- This test verifies that if the functions and tables unregistered when the database is dropped
+-- and other databases are not affected
+
+CREATE DATABASE TEST_database
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:TEST_database
+POSTHOOK: query: -- This test verifies that if the functions and tables unregistered when the database is dropped
+-- and other databases are not affected
+
+CREATE DATABASE TEST_database
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:TEST_database
+PREHOOK: query: USE TEST_database
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:test_database
+POSTHOOK: query: USE TEST_database
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:test_database
+PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: TEST_database@test_table
+PREHOOK: Output: database:test_database
+POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: TEST_database@test_table
+POSTHOOK: Output: database:test_database
+PREHOOK: query: CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: database:test_database
+PREHOOK: Output: test_database.test_func
+POSTHOOK: query: CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: database:test_database
+POSTHOOK: Output: test_database.test_func
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table
+POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table
+PREHOOK: query: CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: database:default
+PREHOOK: Output: default.test_func
+POSTHOOK: query: CREATE FUNCTION test_func as 'org.apache.hadoop.hive.ql.udf.UDFAscii'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default.test_func
+PREHOOK: query: DROP DATABASE TEST_database CASCADE
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:test_database
+PREHOOK: Output: database:test_database
+PREHOOK: Output: test_database@test_table
+POSTHOOK: query: DROP DATABASE TEST_database CASCADE
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:test_database
+POSTHOOK: Output: database:test_database
+POSTHOOK: Output: test_database@test_table
+PREHOOK: query: describe test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: describe test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+key                 	string              	                    
+value               	string              	                    
+PREHOOK: query: describe function test_func
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: describe function test_func
+POSTHOOK: type: DESCFUNCTION
+test_func(str) - returns the numeric value of the first character of str
+PREHOOK: query: describe function TEST_database.test_func
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: describe function TEST_database.test_func
+POSTHOOK: type: DESCFUNCTION
+Function 'TEST_database.test_func' does not exist.
+FAILED: SemanticException [Error 10001]: Table not found TEST_database.test_table


[32/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
index 7c17733..a30820e 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where d=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where d=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where d='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where d='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d!=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,11 +106,11 @@ POSTHOOK: query: select * from newtypestbl where d!=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -119,11 +119,11 @@ POSTHOOK: query: select * from newtypestbl where d!=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -132,11 +132,11 @@ POSTHOOK: query: select * from newtypestbl where d!='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -145,11 +145,11 @@ POSTHOOK: query: select * from newtypestbl where d!='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,11 +158,11 @@ POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -171,11 +171,11 @@ POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<11.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -184,11 +184,11 @@ POSTHOOK: query: select * from newtypestbl where d<11.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<11.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -197,11 +197,11 @@ POSTHOOK: query: select * from newtypestbl where d<11.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<'11.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -210,11 +210,11 @@ POSTHOOK: query: select * from newtypestbl where d<'11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<'11.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -223,11 +223,11 @@ POSTHOOK: query: select * from newtypestbl where d<'11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,11 +236,11 @@ POSTHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -249,11 +249,11 @@ POSTHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -262,11 +262,11 @@ POSTHOOK: query: select * from newtypestbl where d<1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -275,11 +275,11 @@ POSTHOOK: query: select * from newtypestbl where d<1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<=11.22 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -288,16 +288,16 @@ POSTHOOK: query: select * from newtypestbl where d<=11.22 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=11.22 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -306,16 +306,16 @@ POSTHOOK: query: select * from newtypestbl where d<=11.22 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<='11.22' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -324,16 +324,16 @@ POSTHOOK: query: select * from newtypestbl where d<='11.22' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<='11.22' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -342,16 +342,16 @@ POSTHOOK: query: select * from newtypestbl where d<='11.22' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as float) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -360,16 +360,16 @@ POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as float) sort
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as float) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -378,16 +378,16 @@ POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as float) sort
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -396,11 +396,11 @@ POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -409,11 +409,11 @@ POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d<=11.22BD sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -422,16 +422,16 @@ POSTHOOK: query: select * from newtypestbl where d<=11.22BD sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=11.22BD sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -440,16 +440,16 @@ POSTHOOK: query: select * from newtypestbl where d<=11.22BD sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=12 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -458,16 +458,16 @@ POSTHOOK: query: select * from newtypestbl where d<=12 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d<=12 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -476,16 +476,16 @@ POSTHOOK: query: select * from newtypestbl where d<=12 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -494,11 +494,11 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -507,11 +507,11 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d in ('0.22', '11.22') sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -520,16 +520,16 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.22', '11.22') sort by
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d in ('0.22', '11.22') sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -538,16 +538,16 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.22', '11.22') sort by
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d in ('0.9', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -572,11 +572,11 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -585,11 +585,11 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -598,16 +598,16 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -616,16 +616,16 @@ POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d between 0 and 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -634,11 +634,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -647,11 +647,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and 1000 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -660,16 +660,16 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and 1000 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d between 0 and 1000 sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -678,16 +678,16 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and 1000 sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -696,11 +696,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -709,11 +709,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -722,11 +722,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -735,11 +735,11 @@ POSTHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char(10))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -748,11 +748,11 @@ POSTHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char(10))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -761,8 +761,8 @@ POSTHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out b/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
index e314c10..3b3e5b7 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where cast(ts as string)='2011-01-01
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,11 +106,11 @@ POSTHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -119,11 +119,11 @@ POSTHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -132,11 +132,11 @@ POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -145,11 +145,11 @@ POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,16 +158,16 @@ POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -176,16 +176,16 @@ POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -210,11 +210,11 @@ POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -223,11 +223,11 @@ POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,16 +236,16 @@ POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -254,16 +254,16 @@ POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -272,11 +272,11 @@ POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -285,11 +285,11 @@ POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -298,16 +298,16 @@ POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -316,16 +316,16 @@ POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -350,11 +350,11 @@ POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -363,11 +363,11 @@ POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
 PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -376,16 +376,16 @@ POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -394,16 +394,16 @@ POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-apple     	bee	0.22	2011-01-01 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
-hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+apple     	bee	0.220	2011-01-01 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
+hello     	world	11.220	2011-01-20 01:01:01
 PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out b/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
index 2e9f72f..5a62e80 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where v="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where v="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v!="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where v!="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v!="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where v!="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v<"world"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where v<"world"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v<"world"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where v<"world"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v<="world" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,16 +106,16 @@ POSTHOOK: query: select * from newtypestbl where v<="world" sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v<="world" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -124,16 +124,16 @@ POSTHOOK: query: select * from newtypestbl where v<="world" sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v="bee   "
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,11 +158,11 @@ POSTHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -171,11 +171,11 @@ POSTHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v in ("bee", "world") sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -184,16 +184,16 @@ POSTHOOK: query: select * from newtypestbl where v in ("bee", "world") sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v in ("bee", "world") sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -202,16 +202,16 @@ POSTHOOK: query: select * from newtypestbl where v in ("bee", "world") sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v in ("orange")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,11 +236,11 @@ POSTHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -249,11 +249,11 @@ POSTHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where v between "bee" and "zombie" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -262,16 +262,16 @@ POSTHOOK: query: select * from newtypestbl where v between "bee" and "zombie" so
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v between "bee" and "zombie" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -280,16 +280,16 @@ POSTHOOK: query: select * from newtypestbl where v between "bee" and "zombie" so
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where v between "orange" and "pine"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
index aa3b272..980b65b 100644
--- a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
@@ -241,7 +241,7 @@ POSTHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_pred
 #### A masked pattern was here ####
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_pred
@@ -250,7 +250,7 @@ POSTHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_pred
 #### A masked pattern was here ####
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/serde_regex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/serde_regex.q.out b/ql/src/test/results/clientpositive/serde_regex.q.out
index ad3af57..7bebb0c 100644
--- a/ql/src/test/results/clientpositive/serde_regex.q.out
+++ b/ql/src/test/results/clientpositive/serde_regex.q.out
@@ -201,43 +201,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@serde_regex1
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: DROP TABLE serde_regex1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@serde_regex1

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out b/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
index 318be3d..cebc342 100644
--- a/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
+++ b/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
@@ -92,9 +92,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19
-Beck	0
-Beck	79.9
+Tom	19.00
+Beck	0.00
+Beck	79.90
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1
@@ -143,10 +143,10 @@ POSTHOOK: Input: default@avro_dec1
 77.3
 55.7
 4.3
-6
+6.0
 12.3
 33.3
-19
+19.0
 3.2
 79.9
 PREHOOK: query: DROP TABLE dec

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out b/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out
index b2704c6..46fbeb7 100644
--- a/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/decimal_1_1.q.out
@@ -26,9 +26,9 @@ POSTHOOK: query: select * from decimal_1_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_1_1
 #### A masked pattern was here ####
-0
-0
-0
+0.0
+0.0
+0.0
 0.1
 0.2
 0.9
@@ -37,13 +37,13 @@ NULL
 0.3
 NULL
 NULL
-0
-0
+0.0
+0.0
 NULL
-0
-0
-0
-0
+0.0
+0.0
+0.0
+0.0
 -0.1
 -0.2
 -0.9
@@ -52,10 +52,10 @@ NULL
 -0.3
 NULL
 NULL
-0
-0
+0.0
+0.0
 NULL
-0
+0.0
 PREHOOK: query: select d from decimal_1_1 order by d desc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_1_1
@@ -69,18 +69,18 @@ POSTHOOK: Input: default@decimal_1_1
 0.3
 0.2
 0.1
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
 -0.1
 -0.2
 -0.3

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
index 20b188b..bc785f9 100644
--- a/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
+++ b/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
@@ -172,112 +172,112 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -288,109 +288,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
index 133769a..f1ff784 100644
--- a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
@@ -650,34 +650,34 @@ POSTHOOK: Input: default@decimal_date_test
 -18.5162162162
 -17.3216216216
 -16.7243243243
--16.127027027
+-16.1270270270
 -15.5297297297
 -10.7513513514
 -9.5567567568
 -8.3621621622
--5.972972973
+-5.9729729730
 -3.5837837838
 4.1810810811
 4.7783783784
 4.7783783784
 5.3756756757
-5.972972973
-5.972972973
+5.9729729730
+5.9729729730
 11.3486486486
 11.3486486486
 11.9459459459
 14.9324324324
 19.1135135135
 20.3081081081
-22.1
+22.1000000000
 24.4891891892
 33.4486486486
 34.6432432432
 40.0189189189
 42.4081081081
 43.0054054054
-44.2
-44.2
+44.2000000000
+44.2000000000
 44.7972972973
 45.9918918919
 PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
index e1acab1..c3e7779 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
@@ -204,13 +204,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
index f6b2920..bcabc98 100644
--- a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
@@ -159,7 +159,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education
@@ -252,7 +252,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education


[40/55] [abbrv] hive git commit: Revert inadvertant addition of HiveConf.java.orig file

Posted by jx...@apache.org.
Revert inadvertant addition of HiveConf.java.orig file


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/37f05f41
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/37f05f41
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/37f05f41

Branch: refs/heads/master-fixed
Commit: 37f05f410c5243ac6935feee267069cd246c9b38
Parents: 11f5d44
Author: Matt McCline <mm...@hortonworks.com>
Authored: Wed Nov 4 14:18:03 2015 -0800
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Wed Nov 4 14:18:03 2015 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hive/conf/HiveConf.java.orig  | 3372 ------------------
 1 file changed, 3372 deletions(-)
----------------------------------------------------------------------



[06/55] [abbrv] hive git commit: HIVE-12215: Exchange partition does not show outputs field for post/pre execute hooks (Aihua Xu, reviewed by Xuefu Zhang)

Posted by jx...@apache.org.
HIVE-12215: Exchange partition does not show outputs field for post/pre execute hooks (Aihua Xu, reviewed by Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/55a24f0a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/55a24f0a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/55a24f0a

Branch: refs/heads/master-fixed
Commit: 55a24f0a0da2e984cb59ab513b4d7b9cb7c3b2d8
Parents: f4bac6a
Author: Aihua Xu <ai...@gmail.com>
Authored: Mon Nov 2 09:21:38 2015 -0800
Committer: Chao Sun <su...@apache.org>
Committed: Mon Nov 2 09:21:38 2015 -0800

----------------------------------------------------------------------
 metastore/if/hive_metastore.thrift              |    5 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 1752 +++++---
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  186 +
 .../ThriftHiveMetastore_server.skeleton.cpp     |    5 +
 .../hive/metastore/api/ThriftHiveMetastore.java | 3987 +++++++++++++-----
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1144 +++--
 .../hive_metastore/ThriftHiveMetastore-remote   |    7 +
 .../hive_metastore/ThriftHiveMetastore.py       |  790 +++-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   84 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |   13 +-
 .../hive/metastore/HiveMetaStoreClient.java     |   16 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   16 +
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   13 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   31 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |    5 +
 .../clientnegative/exchange_partition.q.out     |    2 +
 .../clientpositive/exchange_partition.q.out     |    6 +
 .../clientpositive/exchange_partition2.q.out    |    6 +
 .../clientpositive/exchange_partition3.q.out    |    8 +
 .../clientpositive/exchgpartition2lel.q.out     |   18 +
 20 files changed, 5902 insertions(+), 2192 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
index 3e30f56..98fd42b 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -988,6 +988,11 @@ service ThriftHiveMetastore extends fb303.FacebookService
       throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
       4:InvalidInputException o4)
 
+  list<Partition> exchange_partitions(1:map<string, string> partitionSpecs, 2:string source_db,
+      3:string source_table_name, 4:string dest_db, 5:string dest_table_name)
+      throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
+      4:InvalidInputException o4)
+
   Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals,
       4: string user_name, 5: list<string> group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2)
 


[47/55] [abbrv] hive git commit: HIVE-12230 custom UDF configure() not called in Vectorization mode (Matt McCline, reviewd by Jason Dere)

Posted by jx...@apache.org.
HIVE-12230 custom UDF configure() not called in Vectorization mode (Matt McCline, reviewd by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9dae39cc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9dae39cc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9dae39cc

Branch: refs/heads/master-fixed
Commit: 9dae39cc78b68058b06e90b6065046d1fa9b001d
Parents: 81de857
Author: Matt McCline <mm...@hortonworks.com>
Authored: Thu Nov 5 13:16:14 2015 -0800
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Thu Nov 5 13:16:14 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/MapredContext.java      |  2 +-
 .../ql/exec/vector/udf/VectorUDFAdaptor.java    |  5 ++
 .../hive/ql/exec/vector/UDFHelloTest.java       | 69 +++++++++++++++++++
 .../vector_custom_udf_configure.q               | 11 +++
 .../vector_custom_udf_configure.q.out           | 70 ++++++++++++++++++++
 5 files changed, 156 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9dae39cc/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
index 6ce84ac..b7ed0c1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
@@ -116,7 +116,7 @@ public class MapredContext {
     udfs.clear();
   }
 
-  void setup(GenericUDF genericUDF) {
+  public void setup(GenericUDF genericUDF) {
     if (needConfigure(genericUDF)) {
       genericUDF.configure(this);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/9dae39cc/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
index b397398..d3a0f9f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
@@ -21,6 +21,7 @@ import java.sql.Date;
 import java.sql.Timestamp;
 
 import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.ql.exec.MapredContext;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.vector.*;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
@@ -84,6 +85,10 @@ public class VectorUDFAdaptor extends VectorExpression {
     for (int i = 0; i < childrenOIs.length; i++) {
       childrenOIs[i] = writers[i].getObjectInspector();
     }
+    MapredContext context = MapredContext.get();
+    if (context != null) {
+      context.setup(genericUDF);
+    }
     outputOI = VectorExpressionWriterFactory.genVectorExpressionWritable(expr)
         .getObjectInspector();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9dae39cc/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/UDFHelloTest.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/UDFHelloTest.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/UDFHelloTest.java
new file mode 100644
index 0000000..48fb59a
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/UDFHelloTest.java
@@ -0,0 +1,69 @@
+package org.apache.hadoop.hive.ql.exec.vector;
+
+import org.apache.hadoop.hive.ql.exec.MapredContext;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * UDF to obfuscate input data appending "Hello "
+ */
+public class UDFHelloTest extends GenericUDF {
+  private static final Logger LOG = LoggerFactory.getLogger(UDFHelloTest.class);
+
+  private Text result = new Text();
+
+  private static String greeting = "";
+
+  private ObjectInspectorConverters.Converter[] converters;
+
+  @Override
+  public Object evaluate(DeferredObject[] arg0) throws HiveException {
+
+    if (arg0.length != 1) {
+      LOG.error("UDFHelloTest expects exactly 1 argument");
+      throw new HiveException("UDFHelloTest expects exactly 1 argument");
+    }
+
+    if (arg0[0].get() == null) {
+      LOG.warn("Empty input");
+      return null;
+    }
+
+    Text data = (Text) converters[0].convert(arg0[0].get());
+
+    String dataString = data.toString();
+
+    result.set(greeting + dataString);
+
+    return result;
+  }
+
+  @Override
+  public String getDisplayString(String[] arg0) {
+    return "Hello...";
+  }
+
+  @Override
+  public void configure(MapredContext context) {
+    greeting = "Hello ";
+  }
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arg0) throws UDFArgumentException {
+    converters = new ObjectInspectorConverters.Converter[arg0.length];
+    for (int i = 0; i < arg0.length; i++) {
+      converters[i] = ObjectInspectorConverters.getConverter(arg0[i],
+              PrimitiveObjectInspectorFactory.writableStringObjectInspector);
+    }
+
+    // evaluate will return a Text object
+    return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9dae39cc/ql/src/test/queries/clientpositive/vector_custom_udf_configure.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_custom_udf_configure.q b/ql/src/test/queries/clientpositive/vector_custom_udf_configure.q
new file mode 100644
index 0000000..eb19f3a
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_custom_udf_configure.q
@@ -0,0 +1,11 @@
+set hive.fetch.task.conversion=none;
+
+create temporary function UDFHelloTest as 'org.apache.hadoop.hive.ql.exec.vector.UDFHelloTest';
+
+create table testorc1(id int, name string) stored as orc;
+insert into table testorc1 values(1, 'a1'), (2,'a2');
+ 
+set hive.vectorized.execution.enabled=true;
+explain
+select id, UDFHelloTest(name) from testorc1;
+select id, UDFHelloTest(name) from testorc1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/9dae39cc/ql/src/test/results/clientpositive/vector_custom_udf_configure.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_custom_udf_configure.q.out b/ql/src/test/results/clientpositive/vector_custom_udf_configure.q.out
new file mode 100644
index 0000000..d529873
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_custom_udf_configure.q.out
@@ -0,0 +1,70 @@
+PREHOOK: query: create temporary function UDFHelloTest as 'org.apache.hadoop.hive.ql.exec.vector.UDFHelloTest'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: udfhellotest
+POSTHOOK: query: create temporary function UDFHelloTest as 'org.apache.hadoop.hive.ql.exec.vector.UDFHelloTest'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: udfhellotest
+PREHOOK: query: create table testorc1(id int, name string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testorc1
+POSTHOOK: query: create table testorc1(id int, name string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testorc1
+PREHOOK: query: insert into table testorc1 values(1, 'a1'), (2,'a2')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@testorc1
+POSTHOOK: query: insert into table testorc1 values(1, 'a1'), (2,'a2')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@testorc1
+POSTHOOK: Lineage: testorc1.id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: testorc1.name SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain
+select id, UDFHelloTest(name) from testorc1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select id, UDFHelloTest(name) from testorc1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: testorc1
+            Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: id (type: int), Hello... (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select id, UDFHelloTest(name) from testorc1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc1
+#### A masked pattern was here ####
+POSTHOOK: query: select id, UDFHelloTest(name) from testorc1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc1
+#### A masked pattern was here ####
+1	Hello a1
+2	Hello a2


[05/55] [abbrv] hive git commit: HIVE-12215: Exchange partition does not show outputs field for post/pre execute hooks (Aihua Xu, reviewed by Xuefu Zhang)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index a82c363..6a80db7 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -11672,6 +11672,402 @@ uint32_t ThriftHiveMetastore_exchange_partition_presult::read(::apache::thrift::
 }
 
 
+ThriftHiveMetastore_exchange_partitions_args::~ThriftHiveMetastore_exchange_partitions_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_MAP) {
+          {
+            this->partitionSpecs.clear();
+            uint32_t _size904;
+            ::apache::thrift::protocol::TType _ktype905;
+            ::apache::thrift::protocol::TType _vtype906;
+            xfer += iprot->readMapBegin(_ktype905, _vtype906, _size904);
+            uint32_t _i908;
+            for (_i908 = 0; _i908 < _size904; ++_i908)
+            {
+              std::string _key909;
+              xfer += iprot->readString(_key909);
+              std::string& _val910 = this->partitionSpecs[_key909];
+              xfer += iprot->readString(_val910);
+            }
+            xfer += iprot->readMapEnd();
+          }
+          this->__isset.partitionSpecs = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->source_db);
+          this->__isset.source_db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->source_table_name);
+          this->__isset.source_table_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->dest_db);
+          this->__isset.dest_db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->dest_table_name);
+          this->__isset.dest_table_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_exchange_partitions_args");
+
+  xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
+  {
+    xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
+    std::map<std::string, std::string> ::const_iterator _iter911;
+    for (_iter911 = this->partitionSpecs.begin(); _iter911 != this->partitionSpecs.end(); ++_iter911)
+    {
+      xfer += oprot->writeString(_iter911->first);
+      xfer += oprot->writeString(_iter911->second);
+    }
+    xfer += oprot->writeMapEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("source_db", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->source_db);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("source_table_name", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->source_table_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("dest_db", ::apache::thrift::protocol::T_STRING, 4);
+  xfer += oprot->writeString(this->dest_db);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("dest_table_name", ::apache::thrift::protocol::T_STRING, 5);
+  xfer += oprot->writeString(this->dest_table_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_exchange_partitions_pargs::~ThriftHiveMetastore_exchange_partitions_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_exchange_partitions_pargs");
+
+  xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
+  {
+    xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
+    std::map<std::string, std::string> ::const_iterator _iter912;
+    for (_iter912 = (*(this->partitionSpecs)).begin(); _iter912 != (*(this->partitionSpecs)).end(); ++_iter912)
+    {
+      xfer += oprot->writeString(_iter912->first);
+      xfer += oprot->writeString(_iter912->second);
+    }
+    xfer += oprot->writeMapEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("source_db", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->source_db)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("source_table_name", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString((*(this->source_table_name)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("dest_db", ::apache::thrift::protocol::T_STRING, 4);
+  xfer += oprot->writeString((*(this->dest_db)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("dest_table_name", ::apache::thrift::protocol::T_STRING, 5);
+  xfer += oprot->writeString((*(this->dest_table_name)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_exchange_partitions_result::~ThriftHiveMetastore_exchange_partitions_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->success.clear();
+            uint32_t _size913;
+            ::apache::thrift::protocol::TType _etype916;
+            xfer += iprot->readListBegin(_etype916, _size913);
+            this->success.resize(_size913);
+            uint32_t _i917;
+            for (_i917 = 0; _i917 < _size913; ++_i917)
+            {
+              xfer += this->success[_i917].read(iprot);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o4.read(iprot);
+          this->__isset.o4 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_exchange_partitions_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+    {
+      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
+      std::vector<Partition> ::const_iterator _iter918;
+      for (_iter918 = this->success.begin(); _iter918 != this->success.end(); ++_iter918)
+      {
+        xfer += (*_iter918).write(oprot);
+      }
+      xfer += oprot->writeListEnd();
+    }
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o3) {
+    xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
+    xfer += this->o3.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o4) {
+    xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4);
+    xfer += this->o4.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_exchange_partitions_presult::~ThriftHiveMetastore_exchange_partitions_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            (*(this->success)).clear();
+            uint32_t _size919;
+            ::apache::thrift::protocol::TType _etype922;
+            xfer += iprot->readListBegin(_etype922, _size919);
+            (*(this->success)).resize(_size919);
+            uint32_t _i923;
+            for (_i923 = 0; _i923 < _size919; ++_i923)
+            {
+              xfer += (*(this->success))[_i923].read(iprot);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o4.read(iprot);
+          this->__isset.o4 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
 ThriftHiveMetastore_get_partition_with_auth_args::~ThriftHiveMetastore_get_partition_with_auth_args() throw() {
 }
 
@@ -11717,14 +12113,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size904;
-            ::apache::thrift::protocol::TType _etype907;
-            xfer += iprot->readListBegin(_etype907, _size904);
-            this->part_vals.resize(_size904);
-            uint32_t _i908;
-            for (_i908 = 0; _i908 < _size904; ++_i908)
+            uint32_t _size924;
+            ::apache::thrift::protocol::TType _etype927;
+            xfer += iprot->readListBegin(_etype927, _size924);
+            this->part_vals.resize(_size924);
+            uint32_t _i928;
+            for (_i928 = 0; _i928 < _size924; ++_i928)
             {
-              xfer += iprot->readString(this->part_vals[_i908]);
+              xfer += iprot->readString(this->part_vals[_i928]);
             }
             xfer += iprot->readListEnd();
           }
@@ -11745,14 +12141,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->group_names.clear();
-            uint32_t _size909;
-            ::apache::thrift::protocol::TType _etype912;
-            xfer += iprot->readListBegin(_etype912, _size909);
-            this->group_names.resize(_size909);
-            uint32_t _i913;
-            for (_i913 = 0; _i913 < _size909; ++_i913)
+            uint32_t _size929;
+            ::apache::thrift::protocol::TType _etype932;
+            xfer += iprot->readListBegin(_etype932, _size929);
+            this->group_names.resize(_size929);
+            uint32_t _i933;
+            for (_i933 = 0; _i933 < _size929; ++_i933)
             {
-              xfer += iprot->readString(this->group_names[_i913]);
+              xfer += iprot->readString(this->group_names[_i933]);
             }
             xfer += iprot->readListEnd();
           }
@@ -11789,10 +12185,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter914;
-    for (_iter914 = this->part_vals.begin(); _iter914 != this->part_vals.end(); ++_iter914)
+    std::vector<std::string> ::const_iterator _iter934;
+    for (_iter934 = this->part_vals.begin(); _iter934 != this->part_vals.end(); ++_iter934)
     {
-      xfer += oprot->writeString((*_iter914));
+      xfer += oprot->writeString((*_iter934));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11805,10 +12201,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
-    std::vector<std::string> ::const_iterator _iter915;
-    for (_iter915 = this->group_names.begin(); _iter915 != this->group_names.end(); ++_iter915)
+    std::vector<std::string> ::const_iterator _iter935;
+    for (_iter935 = this->group_names.begin(); _iter935 != this->group_names.end(); ++_iter935)
     {
-      xfer += oprot->writeString((*_iter915));
+      xfer += oprot->writeString((*_iter935));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11840,10 +12236,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter916;
-    for (_iter916 = (*(this->part_vals)).begin(); _iter916 != (*(this->part_vals)).end(); ++_iter916)
+    std::vector<std::string> ::const_iterator _iter936;
+    for (_iter936 = (*(this->part_vals)).begin(); _iter936 != (*(this->part_vals)).end(); ++_iter936)
     {
-      xfer += oprot->writeString((*_iter916));
+      xfer += oprot->writeString((*_iter936));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11856,10 +12252,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
-    std::vector<std::string> ::const_iterator _iter917;
-    for (_iter917 = (*(this->group_names)).begin(); _iter917 != (*(this->group_names)).end(); ++_iter917)
+    std::vector<std::string> ::const_iterator _iter937;
+    for (_iter937 = (*(this->group_names)).begin(); _iter937 != (*(this->group_names)).end(); ++_iter937)
     {
-      xfer += oprot->writeString((*_iter917));
+      xfer += oprot->writeString((*_iter937));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12418,14 +12814,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size918;
-            ::apache::thrift::protocol::TType _etype921;
-            xfer += iprot->readListBegin(_etype921, _size918);
-            this->success.resize(_size918);
-            uint32_t _i922;
-            for (_i922 = 0; _i922 < _size918; ++_i922)
+            uint32_t _size938;
+            ::apache::thrift::protocol::TType _etype941;
+            xfer += iprot->readListBegin(_etype941, _size938);
+            this->success.resize(_size938);
+            uint32_t _i942;
+            for (_i942 = 0; _i942 < _size938; ++_i942)
             {
-              xfer += this->success[_i922].read(iprot);
+              xfer += this->success[_i942].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -12472,10 +12868,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter923;
-      for (_iter923 = this->success.begin(); _iter923 != this->success.end(); ++_iter923)
+      std::vector<Partition> ::const_iterator _iter943;
+      for (_iter943 = this->success.begin(); _iter943 != this->success.end(); ++_iter943)
       {
-        xfer += (*_iter923).write(oprot);
+        xfer += (*_iter943).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -12524,14 +12920,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size924;
-            ::apache::thrift::protocol::TType _etype927;
-            xfer += iprot->readListBegin(_etype927, _size924);
-            (*(this->success)).resize(_size924);
-            uint32_t _i928;
-            for (_i928 = 0; _i928 < _size924; ++_i928)
+            uint32_t _size944;
+            ::apache::thrift::protocol::TType _etype947;
+            xfer += iprot->readListBegin(_etype947, _size944);
+            (*(this->success)).resize(_size944);
+            uint32_t _i948;
+            for (_i948 = 0; _i948 < _size944; ++_i948)
             {
-              xfer += (*(this->success))[_i928].read(iprot);
+              xfer += (*(this->success))[_i948].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -12630,14 +13026,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->group_names.clear();
-            uint32_t _size929;
-            ::apache::thrift::protocol::TType _etype932;
-            xfer += iprot->readListBegin(_etype932, _size929);
-            this->group_names.resize(_size929);
-            uint32_t _i933;
-            for (_i933 = 0; _i933 < _size929; ++_i933)
+            uint32_t _size949;
+            ::apache::thrift::protocol::TType _etype952;
+            xfer += iprot->readListBegin(_etype952, _size949);
+            this->group_names.resize(_size949);
+            uint32_t _i953;
+            for (_i953 = 0; _i953 < _size949; ++_i953)
             {
-              xfer += iprot->readString(this->group_names[_i933]);
+              xfer += iprot->readString(this->group_names[_i953]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12682,10 +13078,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
-    std::vector<std::string> ::const_iterator _iter934;
-    for (_iter934 = this->group_names.begin(); _iter934 != this->group_names.end(); ++_iter934)
+    std::vector<std::string> ::const_iterator _iter954;
+    for (_iter954 = this->group_names.begin(); _iter954 != this->group_names.end(); ++_iter954)
     {
-      xfer += oprot->writeString((*_iter934));
+      xfer += oprot->writeString((*_iter954));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12725,10 +13121,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
-    std::vector<std::string> ::const_iterator _iter935;
-    for (_iter935 = (*(this->group_names)).begin(); _iter935 != (*(this->group_names)).end(); ++_iter935)
+    std::vector<std::string> ::const_iterator _iter955;
+    for (_iter955 = (*(this->group_names)).begin(); _iter955 != (*(this->group_names)).end(); ++_iter955)
     {
-      xfer += oprot->writeString((*_iter935));
+      xfer += oprot->writeString((*_iter955));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12769,14 +13165,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size936;
-            ::apache::thrift::protocol::TType _etype939;
-            xfer += iprot->readListBegin(_etype939, _size936);
-            this->success.resize(_size936);
-            uint32_t _i940;
-            for (_i940 = 0; _i940 < _size936; ++_i940)
+            uint32_t _size956;
+            ::apache::thrift::protocol::TType _etype959;
+            xfer += iprot->readListBegin(_etype959, _size956);
+            this->success.resize(_size956);
+            uint32_t _i960;
+            for (_i960 = 0; _i960 < _size956; ++_i960)
             {
-              xfer += this->success[_i940].read(iprot);
+              xfer += this->success[_i960].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -12823,10 +13219,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter941;
-      for (_iter941 = this->success.begin(); _iter941 != this->success.end(); ++_iter941)
+      std::vector<Partition> ::const_iterator _iter961;
+      for (_iter961 = this->success.begin(); _iter961 != this->success.end(); ++_iter961)
       {
-        xfer += (*_iter941).write(oprot);
+        xfer += (*_iter961).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -12875,14 +13271,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size942;
-            ::apache::thrift::protocol::TType _etype945;
-            xfer += iprot->readListBegin(_etype945, _size942);
-            (*(this->success)).resize(_size942);
-            uint32_t _i946;
-            for (_i946 = 0; _i946 < _size942; ++_i946)
+            uint32_t _size962;
+            ::apache::thrift::protocol::TType _etype965;
+            xfer += iprot->readListBegin(_etype965, _size962);
+            (*(this->success)).resize(_size962);
+            uint32_t _i966;
+            for (_i966 = 0; _i966 < _size962; ++_i966)
             {
-              xfer += (*(this->success))[_i946].read(iprot);
+              xfer += (*(this->success))[_i966].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13060,14 +13456,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size947;
-            ::apache::thrift::protocol::TType _etype950;
-            xfer += iprot->readListBegin(_etype950, _size947);
-            this->success.resize(_size947);
-            uint32_t _i951;
-            for (_i951 = 0; _i951 < _size947; ++_i951)
+            uint32_t _size967;
+            ::apache::thrift::protocol::TType _etype970;
+            xfer += iprot->readListBegin(_etype970, _size967);
+            this->success.resize(_size967);
+            uint32_t _i971;
+            for (_i971 = 0; _i971 < _size967; ++_i971)
             {
-              xfer += this->success[_i951].read(iprot);
+              xfer += this->success[_i971].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13114,10 +13510,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<PartitionSpec> ::const_iterator _iter952;
-      for (_iter952 = this->success.begin(); _iter952 != this->success.end(); ++_iter952)
+      std::vector<PartitionSpec> ::const_iterator _iter972;
+      for (_iter972 = this->success.begin(); _iter972 != this->success.end(); ++_iter972)
       {
-        xfer += (*_iter952).write(oprot);
+        xfer += (*_iter972).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -13166,14 +13562,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size953;
-            ::apache::thrift::protocol::TType _etype956;
-            xfer += iprot->readListBegin(_etype956, _size953);
-            (*(this->success)).resize(_size953);
-            uint32_t _i957;
-            for (_i957 = 0; _i957 < _size953; ++_i957)
+            uint32_t _size973;
+            ::apache::thrift::protocol::TType _etype976;
+            xfer += iprot->readListBegin(_etype976, _size973);
+            (*(this->success)).resize(_size973);
+            uint32_t _i977;
+            for (_i977 = 0; _i977 < _size973; ++_i977)
             {
-              xfer += (*(this->success))[_i957].read(iprot);
+              xfer += (*(this->success))[_i977].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13351,14 +13747,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size958;
-            ::apache::thrift::protocol::TType _etype961;
-            xfer += iprot->readListBegin(_etype961, _size958);
-            this->success.resize(_size958);
-            uint32_t _i962;
-            for (_i962 = 0; _i962 < _size958; ++_i962)
+            uint32_t _size978;
+            ::apache::thrift::protocol::TType _etype981;
+            xfer += iprot->readListBegin(_etype981, _size978);
+            this->success.resize(_size978);
+            uint32_t _i982;
+            for (_i982 = 0; _i982 < _size978; ++_i982)
             {
-              xfer += iprot->readString(this->success[_i962]);
+              xfer += iprot->readString(this->success[_i982]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13397,10 +13793,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter963;
-      for (_iter963 = this->success.begin(); _iter963 != this->success.end(); ++_iter963)
+      std::vector<std::string> ::const_iterator _iter983;
+      for (_iter983 = this->success.begin(); _iter983 != this->success.end(); ++_iter983)
       {
-        xfer += oprot->writeString((*_iter963));
+        xfer += oprot->writeString((*_iter983));
       }
       xfer += oprot->writeListEnd();
     }
@@ -13445,14 +13841,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size964;
-            ::apache::thrift::protocol::TType _etype967;
-            xfer += iprot->readListBegin(_etype967, _size964);
-            (*(this->success)).resize(_size964);
-            uint32_t _i968;
-            for (_i968 = 0; _i968 < _size964; ++_i968)
+            uint32_t _size984;
+            ::apache::thrift::protocol::TType _etype987;
+            xfer += iprot->readListBegin(_etype987, _size984);
+            (*(this->success)).resize(_size984);
+            uint32_t _i988;
+            for (_i988 = 0; _i988 < _size984; ++_i988)
             {
-              xfer += iprot->readString((*(this->success))[_i968]);
+              xfer += iprot->readString((*(this->success))[_i988]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13527,14 +13923,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size969;
-            ::apache::thrift::protocol::TType _etype972;
-            xfer += iprot->readListBegin(_etype972, _size969);
-            this->part_vals.resize(_size969);
-            uint32_t _i973;
-            for (_i973 = 0; _i973 < _size969; ++_i973)
+            uint32_t _size989;
+            ::apache::thrift::protocol::TType _etype992;
+            xfer += iprot->readListBegin(_etype992, _size989);
+            this->part_vals.resize(_size989);
+            uint32_t _i993;
+            for (_i993 = 0; _i993 < _size989; ++_i993)
             {
-              xfer += iprot->readString(this->part_vals[_i973]);
+              xfer += iprot->readString(this->part_vals[_i993]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13579,10 +13975,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter974;
-    for (_iter974 = this->part_vals.begin(); _iter974 != this->part_vals.end(); ++_iter974)
+    std::vector<std::string> ::const_iterator _iter994;
+    for (_iter994 = this->part_vals.begin(); _iter994 != this->part_vals.end(); ++_iter994)
     {
-      xfer += oprot->writeString((*_iter974));
+      xfer += oprot->writeString((*_iter994));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13618,10 +14014,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter975;
-    for (_iter975 = (*(this->part_vals)).begin(); _iter975 != (*(this->part_vals)).end(); ++_iter975)
+    std::vector<std::string> ::const_iterator _iter995;
+    for (_iter995 = (*(this->part_vals)).begin(); _iter995 != (*(this->part_vals)).end(); ++_iter995)
     {
-      xfer += oprot->writeString((*_iter975));
+      xfer += oprot->writeString((*_iter995));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13666,14 +14062,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size976;
-            ::apache::thrift::protocol::TType _etype979;
-            xfer += iprot->readListBegin(_etype979, _size976);
-            this->success.resize(_size976);
-            uint32_t _i980;
-            for (_i980 = 0; _i980 < _size976; ++_i980)
+            uint32_t _size996;
+            ::apache::thrift::protocol::TType _etype999;
+            xfer += iprot->readListBegin(_etype999, _size996);
+            this->success.resize(_size996);
+            uint32_t _i1000;
+            for (_i1000 = 0; _i1000 < _size996; ++_i1000)
             {
-              xfer += this->success[_i980].read(iprot);
+              xfer += this->success[_i1000].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13720,10 +14116,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter981;
-      for (_iter981 = this->success.begin(); _iter981 != this->success.end(); ++_iter981)
+      std::vector<Partition> ::const_iterator _iter1001;
+      for (_iter1001 = this->success.begin(); _iter1001 != this->success.end(); ++_iter1001)
       {
-        xfer += (*_iter981).write(oprot);
+        xfer += (*_iter1001).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -13772,14 +14168,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size982;
-            ::apache::thrift::protocol::TType _etype985;
-            xfer += iprot->readListBegin(_etype985, _size982);
-            (*(this->success)).resize(_size982);
-            uint32_t _i986;
-            for (_i986 = 0; _i986 < _size982; ++_i986)
+            uint32_t _size1002;
+            ::apache::thrift::protocol::TType _etype1005;
+            xfer += iprot->readListBegin(_etype1005, _size1002);
+            (*(this->success)).resize(_size1002);
+            uint32_t _i1006;
+            for (_i1006 = 0; _i1006 < _size1002; ++_i1006)
             {
-              xfer += (*(this->success))[_i986].read(iprot);
+              xfer += (*(this->success))[_i1006].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13862,14 +14258,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size987;
-            ::apache::thrift::protocol::TType _etype990;
-            xfer += iprot->readListBegin(_etype990, _size987);
-            this->part_vals.resize(_size987);
-            uint32_t _i991;
-            for (_i991 = 0; _i991 < _size987; ++_i991)
+            uint32_t _size1007;
+            ::apache::thrift::protocol::TType _etype1010;
+            xfer += iprot->readListBegin(_etype1010, _size1007);
+            this->part_vals.resize(_size1007);
+            uint32_t _i1011;
+            for (_i1011 = 0; _i1011 < _size1007; ++_i1011)
             {
-              xfer += iprot->readString(this->part_vals[_i991]);
+              xfer += iprot->readString(this->part_vals[_i1011]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13898,14 +14294,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->group_names.clear();
-            uint32_t _size992;
-            ::apache::thrift::protocol::TType _etype995;
-            xfer += iprot->readListBegin(_etype995, _size992);
-            this->group_names.resize(_size992);
-            uint32_t _i996;
-            for (_i996 = 0; _i996 < _size992; ++_i996)
+            uint32_t _size1012;
+            ::apache::thrift::protocol::TType _etype1015;
+            xfer += iprot->readListBegin(_etype1015, _size1012);
+            this->group_names.resize(_size1012);
+            uint32_t _i1016;
+            for (_i1016 = 0; _i1016 < _size1012; ++_i1016)
             {
-              xfer += iprot->readString(this->group_names[_i996]);
+              xfer += iprot->readString(this->group_names[_i1016]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13942,10 +14338,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter997;
-    for (_iter997 = this->part_vals.begin(); _iter997 != this->part_vals.end(); ++_iter997)
+    std::vector<std::string> ::const_iterator _iter1017;
+    for (_iter1017 = this->part_vals.begin(); _iter1017 != this->part_vals.end(); ++_iter1017)
     {
-      xfer += oprot->writeString((*_iter997));
+      xfer += oprot->writeString((*_iter1017));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13962,10 +14358,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
-    std::vector<std::string> ::const_iterator _iter998;
-    for (_iter998 = this->group_names.begin(); _iter998 != this->group_names.end(); ++_iter998)
+    std::vector<std::string> ::const_iterator _iter1018;
+    for (_iter1018 = this->group_names.begin(); _iter1018 != this->group_names.end(); ++_iter1018)
     {
-      xfer += oprot->writeString((*_iter998));
+      xfer += oprot->writeString((*_iter1018));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13997,10 +14393,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache::
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter999;
-    for (_iter999 = (*(this->part_vals)).begin(); _iter999 != (*(this->part_vals)).end(); ++_iter999)
+    std::vector<std::string> ::const_iterator _iter1019;
+    for (_iter1019 = (*(this->part_vals)).begin(); _iter1019 != (*(this->part_vals)).end(); ++_iter1019)
     {
-      xfer += oprot->writeString((*_iter999));
+      xfer += oprot->writeString((*_iter1019));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14017,10 +14413,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache::
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
-    std::vector<std::string> ::const_iterator _iter1000;
-    for (_iter1000 = (*(this->group_names)).begin(); _iter1000 != (*(this->group_names)).end(); ++_iter1000)
+    std::vector<std::string> ::const_iterator _iter1020;
+    for (_iter1020 = (*(this->group_names)).begin(); _iter1020 != (*(this->group_names)).end(); ++_iter1020)
     {
-      xfer += oprot->writeString((*_iter1000));
+      xfer += oprot->writeString((*_iter1020));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14061,14 +14457,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1001;
-            ::apache::thrift::protocol::TType _etype1004;
-            xfer += iprot->readListBegin(_etype1004, _size1001);
-            this->success.resize(_size1001);
-            uint32_t _i1005;
-            for (_i1005 = 0; _i1005 < _size1001; ++_i1005)
+            uint32_t _size1021;
+            ::apache::thrift::protocol::TType _etype1024;
+            xfer += iprot->readListBegin(_etype1024, _size1021);
+            this->success.resize(_size1021);
+            uint32_t _i1025;
+            for (_i1025 = 0; _i1025 < _size1021; ++_i1025)
             {
-              xfer += this->success[_i1005].read(iprot);
+              xfer += this->success[_i1025].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14115,10 +14511,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter1006;
-      for (_iter1006 = this->success.begin(); _iter1006 != this->success.end(); ++_iter1006)
+      std::vector<Partition> ::const_iterator _iter1026;
+      for (_iter1026 = this->success.begin(); _iter1026 != this->success.end(); ++_iter1026)
       {
-        xfer += (*_iter1006).write(oprot);
+        xfer += (*_iter1026).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -14167,14 +14563,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1007;
-            ::apache::thrift::protocol::TType _etype1010;
-            xfer += iprot->readListBegin(_etype1010, _size1007);
-            (*(this->success)).resize(_size1007);
-            uint32_t _i1011;
-            for (_i1011 = 0; _i1011 < _size1007; ++_i1011)
+            uint32_t _size1027;
+            ::apache::thrift::protocol::TType _etype1030;
+            xfer += iprot->readListBegin(_etype1030, _size1027);
+            (*(this->success)).resize(_size1027);
+            uint32_t _i1031;
+            for (_i1031 = 0; _i1031 < _size1027; ++_i1031)
             {
-              xfer += (*(this->success))[_i1011].read(iprot);
+              xfer += (*(this->success))[_i1031].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14257,14 +14653,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1012;
-            ::apache::thrift::protocol::TType _etype1015;
-            xfer += iprot->readListBegin(_etype1015, _size1012);
-            this->part_vals.resize(_size1012);
-            uint32_t _i1016;
-            for (_i1016 = 0; _i1016 < _size1012; ++_i1016)
+            uint32_t _size1032;
+            ::apache::thrift::protocol::TType _etype1035;
+            xfer += iprot->readListBegin(_etype1035, _size1032);
+            this->part_vals.resize(_size1032);
+            uint32_t _i1036;
+            for (_i1036 = 0; _i1036 < _size1032; ++_i1036)
             {
-              xfer += iprot->readString(this->part_vals[_i1016]);
+              xfer += iprot->readString(this->part_vals[_i1036]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14309,10 +14705,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1017;
-    for (_iter1017 = this->part_vals.begin(); _iter1017 != this->part_vals.end(); ++_iter1017)
+    std::vector<std::string> ::const_iterator _iter1037;
+    for (_iter1037 = this->part_vals.begin(); _iter1037 != this->part_vals.end(); ++_iter1037)
     {
-      xfer += oprot->writeString((*_iter1017));
+      xfer += oprot->writeString((*_iter1037));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14348,10 +14744,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1018;
-    for (_iter1018 = (*(this->part_vals)).begin(); _iter1018 != (*(this->part_vals)).end(); ++_iter1018)
+    std::vector<std::string> ::const_iterator _iter1038;
+    for (_iter1038 = (*(this->part_vals)).begin(); _iter1038 != (*(this->part_vals)).end(); ++_iter1038)
     {
-      xfer += oprot->writeString((*_iter1018));
+      xfer += oprot->writeString((*_iter1038));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14396,14 +14792,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1019;
-            ::apache::thrift::protocol::TType _etype1022;
-            xfer += iprot->readListBegin(_etype1022, _size1019);
-            this->success.resize(_size1019);
-            uint32_t _i1023;
-            for (_i1023 = 0; _i1023 < _size1019; ++_i1023)
+            uint32_t _size1039;
+            ::apache::thrift::protocol::TType _etype1042;
+            xfer += iprot->readListBegin(_etype1042, _size1039);
+            this->success.resize(_size1039);
+            uint32_t _i1043;
+            for (_i1043 = 0; _i1043 < _size1039; ++_i1043)
             {
-              xfer += iprot->readString(this->success[_i1023]);
+              xfer += iprot->readString(this->success[_i1043]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14450,10 +14846,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1024;
-      for (_iter1024 = this->success.begin(); _iter1024 != this->success.end(); ++_iter1024)
+      std::vector<std::string> ::const_iterator _iter1044;
+      for (_iter1044 = this->success.begin(); _iter1044 != this->success.end(); ++_iter1044)
       {
-        xfer += oprot->writeString((*_iter1024));
+        xfer += oprot->writeString((*_iter1044));
       }
       xfer += oprot->writeListEnd();
     }
@@ -14502,14 +14898,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1025;
-            ::apache::thrift::protocol::TType _etype1028;
-            xfer += iprot->readListBegin(_etype1028, _size1025);
-            (*(this->success)).resize(_size1025);
-            uint32_t _i1029;
-            for (_i1029 = 0; _i1029 < _size1025; ++_i1029)
+            uint32_t _size1045;
+            ::apache::thrift::protocol::TType _etype1048;
+            xfer += iprot->readListBegin(_etype1048, _size1045);
+            (*(this->success)).resize(_size1045);
+            uint32_t _i1049;
+            for (_i1049 = 0; _i1049 < _size1045; ++_i1049)
             {
-              xfer += iprot->readString((*(this->success))[_i1029]);
+              xfer += iprot->readString((*(this->success))[_i1049]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14703,14 +15099,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1030;
-            ::apache::thrift::protocol::TType _etype1033;
-            xfer += iprot->readListBegin(_etype1033, _size1030);
-            this->success.resize(_size1030);
-            uint32_t _i1034;
-            for (_i1034 = 0; _i1034 < _size1030; ++_i1034)
+            uint32_t _size1050;
+            ::apache::thrift::protocol::TType _etype1053;
+            xfer += iprot->readListBegin(_etype1053, _size1050);
+            this->success.resize(_size1050);
+            uint32_t _i1054;
+            for (_i1054 = 0; _i1054 < _size1050; ++_i1054)
             {
-              xfer += this->success[_i1034].read(iprot);
+              xfer += this->success[_i1054].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14757,10 +15153,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter1035;
-      for (_iter1035 = this->success.begin(); _iter1035 != this->success.end(); ++_iter1035)
+      std::vector<Partition> ::const_iterator _iter1055;
+      for (_iter1055 = this->success.begin(); _iter1055 != this->success.end(); ++_iter1055)
       {
-        xfer += (*_iter1035).write(oprot);
+        xfer += (*_iter1055).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -14809,14 +15205,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1036;
-            ::apache::thrift::protocol::TType _etype1039;
-            xfer += iprot->readListBegin(_etype1039, _size1036);
-            (*(this->success)).resize(_size1036);
-            uint32_t _i1040;
-            for (_i1040 = 0; _i1040 < _size1036; ++_i1040)
+            uint32_t _size1056;
+            ::apache::thrift::protocol::TType _etype1059;
+            xfer += iprot->readListBegin(_etype1059, _size1056);
+            (*(this->success)).resize(_size1056);
+            uint32_t _i1060;
+            for (_i1060 = 0; _i1060 < _size1056; ++_i1060)
             {
-              xfer += (*(this->success))[_i1040].read(iprot);
+              xfer += (*(this->success))[_i1060].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -15010,14 +15406,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1041;
-            ::apache::thrift::protocol::TType _etype1044;
-            xfer += iprot->readListBegin(_etype1044, _size1041);
-            this->success.resize(_size1041);
-            uint32_t _i1045;
-            for (_i1045 = 0; _i1045 < _size1041; ++_i1045)
+            uint32_t _size1061;
+            ::apache::thrift::protocol::TType _etype1064;
+            xfer += iprot->readListBegin(_etype1064, _size1061);
+            this->success.resize(_size1061);
+            uint32_t _i1065;
+            for (_i1065 = 0; _i1065 < _size1061; ++_i1065)
             {
-              xfer += this->success[_i1045].read(iprot);
+              xfer += this->success[_i1065].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -15064,10 +15460,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<PartitionSpec> ::const_iterator _iter1046;
-      for (_iter1046 = this->success.begin(); _iter1046 != this->success.end(); ++_iter1046)
+      std::vector<PartitionSpec> ::const_iterator _iter1066;
+      for (_iter1066 = this->success.begin(); _iter1066 != this->success.end(); ++_iter1066)
       {
-        xfer += (*_iter1046).write(oprot);
+        xfer += (*_iter1066).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -15116,14 +15512,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1047;
-            ::apache::thrift::protocol::TType _etype1050;
-            xfer += iprot->readListBegin(_etype1050, _size1047);
-            (*(this->success)).resize(_size1047);
-            uint32_t _i1051;
-            for (_i1051 = 0; _i1051 < _size1047; ++_i1051)
+            uint32_t _size1067;
+            ::apache::thrift::protocol::TType _etype1070;
+            xfer += iprot->readListBegin(_etype1070, _size1067);
+            (*(this->success)).resize(_size1067);
+            uint32_t _i1071;
+            for (_i1071 = 0; _i1071 < _size1067; ++_i1071)
             {
-              xfer += (*(this->success))[_i1051].read(iprot);
+              xfer += (*(this->success))[_i1071].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -15433,14 +15829,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->names.clear();
-            uint32_t _size1052;
-            ::apache::thrift::protocol::TType _etype1055;
-            xfer += iprot->readListBegin(_etype1055, _size1052);
-            this->names.resize(_size1052);
-            uint32_t _i1056;
-            for (_i1056 = 0; _i1056 < _size1052; ++_i1056)
+            uint32_t _size1072;
+            ::apache::thrift::protocol::TType _etype1075;
+            xfer += iprot->readListBegin(_etype1075, _size1072);
+            this->names.resize(_size1072);
+            uint32_t _i1076;
+            for (_i1076 = 0; _i1076 < _size1072; ++_i1076)
             {
-              xfer += iprot->readString(this->names[_i1056]);
+              xfer += iprot->readString(this->names[_i1076]);
             }
             xfer += iprot->readListEnd();
           }
@@ -15477,10 +15873,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->names.size()));
-    std::vector<std::string> ::const_iterator _iter1057;
-    for (_iter1057 = this->names.begin(); _iter1057 != this->names.end(); ++_iter1057)
+    std::vector<std::string> ::const_iterator _iter1077;
+    for (_iter1077 = this->names.begin(); _iter1077 != this->names.end(); ++_iter1077)
     {
-      xfer += oprot->writeString((*_iter1057));
+      xfer += oprot->writeString((*_iter1077));
     }
     xfer += oprot->writeListEnd();
   }
@@ -15512,10 +15908,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->names)).size()));
-    std::vector<std::string> ::const_iterator _iter1058;
-    for (_iter1058 = (*(this->names)).begin(); _iter1058 != (*(this->names)).end(); ++_iter1058)
+    std::vector<std::string> ::const_iterator _iter1078;
+    for (_iter1078 = (*(this->names)).begin(); _iter1078 != (*(this->names)).end(); ++_iter1078)
     {
-      xfer += oprot->writeString((*_iter1058));
+      xfer += oprot->writeString((*_iter1078));
     }
     xfer += oprot->writeListEnd();
   }
@@ -15556,14 +15952,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1059;
-            ::apache::thrift::protocol::TType _etype1062;
-            xfer += iprot->readListBegin(_etype1062, _size1059);
-            this->success.resize(_size1059);
-            uint32_t _i1063;
-            for (_i1063 = 0; _i1063 < _size1059; ++_i1063)
+            uint32_t _size1079;
+            ::apache::thrift::protocol::TType _etype1082;
+            xfer += iprot->readListBegin(_etype1082, _size1079);
+            this->success.resize(_size1079);
+            uint32_t _i1083;
+            for (_i1083 = 0; _i1083 < _size1079; ++_i1083)
             {
-              xfer += this->success[_i1063].read(iprot);
+              xfer += this->success[_i1083].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -15610,10 +16006,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter1064;
-      for (_iter1064 = this->success.begin(); _iter1064 != this->success.end(); ++_iter1064)
+      std::vector<Partition> ::const_iterator _iter1084;
+      for (_iter1084 = this->success.begin(); _iter1084 != this->success.end(); ++_iter1084)
       {
-        xfer += (*_iter1064).write(oprot);
+        xfer += (*_iter1084).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -15662,14 +16058,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1065;
-            ::apache::thrift::protocol::TType _etype1068;
-            xfer += iprot->readListBegin(_etype1068, _size1065);
-            (*(this->success)).resize(_size1065);
-            uint32_t _i1069;
-            for (_i1069 = 0; _i1069 < _size1065; ++_i1069)
+            uint32_t _size1085;
+            ::apache::thrift::protocol::TType _etype1088;
+            xfer += iprot->readListBegin(_etype1088, _size1085);
+            (*(this->success)).resize(_size1085);
+            uint32_t _i1089;
+            for (_i1089 = 0; _i1089 < _size1085; ++_i1089)
             {
-              xfer += (*(this->success))[_i1069].read(iprot);
+              xfer += (*(this->success))[_i1089].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -15991,14 +16387,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size1070;
-            ::apache::thrift::protocol::TType _etype1073;
-            xfer += iprot->readListBegin(_etype1073, _size1070);
-            this->new_parts.resize(_size1070);
-            uint32_t _i1074;
-            for (_i1074 = 0; _i1074 < _size1070; ++_i1074)
+            uint32_t _size1090;
+            ::apache::thrift::protocol::TType _etype1093;
+            xfer += iprot->readListBegin(_etype1093, _size1090);
+            this->new_parts.resize(_size1090);
+            uint32_t _i1094;
+            for (_i1094 = 0; _i1094 < _size1090; ++_i1094)
             {
-              xfer += this->new_parts[_i1074].read(iprot);
+              xfer += this->new_parts[_i1094].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -16035,10 +16431,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<Partition> ::const_iterator _iter1075;
-    for (_iter1075 = this->new_parts.begin(); _iter1075 != this->new_parts.end(); ++_iter1075)
+    std::vector<Partition> ::const_iterator _iter1095;
+    for (_iter1095 = this->new_parts.begin(); _iter1095 != this->new_parts.end(); ++_iter1095)
     {
-      xfer += (*_iter1075).write(oprot);
+      xfer += (*_iter1095).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -16070,10 +16466,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<Partition> ::const_iterator _iter1076;
-    for (_iter1076 = (*(this->new_parts)).begin(); _iter1076 != (*(this->new_parts)).end(); ++_iter1076)
+    std::vector<Partition> ::const_iterator _iter1096;
+    for (_iter1096 = (*(this->new_parts)).begin(); _iter1096 != (*(this->new_parts)).end(); ++_iter1096)
     {
-      xfer += (*_iter1076).write(oprot);
+      xfer += (*_iter1096).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -16513,14 +16909,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1077;
-            ::apache::thrift::protocol::TType _etype1080;
-            xfer += iprot->readListBegin(_etype1080, _size1077);
-            this->part_vals.resize(_size1077);
-            uint32_t _i1081;
-            for (_i1081 = 0; _i1081 < _size1077; ++_i1081)
+            uint32_t _size1097;
+            ::apache::thrift::protocol::TType _etype1100;
+            xfer += iprot->readListBegin(_etype1100, _size1097);
+            this->part_vals.resize(_size1097);
+            uint32_t _i1101;
+            for (_i1101 = 0; _i1101 < _size1097; ++_i1101)
             {
-              xfer += iprot->readString(this->part_vals[_i1081]);
+              xfer += iprot->readString(this->part_vals[_i1101]);
             }
             xfer += iprot->readListEnd();
           }
@@ -16565,10 +16961,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1082;
-    for (_iter1082 = this->part_vals.begin(); _iter1082 != this->part_vals.end(); ++_iter1082)
+    std::vector<std::string> ::const_iterator _iter1102;
+    for (_iter1102 = this->part_vals.begin(); _iter1102 != this->part_vals.end(); ++_iter1102)
     {
-      xfer += oprot->writeString((*_iter1082));
+      xfer += oprot->writeString((*_iter1102));
     }
     xfer += oprot->writeListEnd();
   }
@@ -16604,10 +17000,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1083;
-    for (_iter1083 = (*(this->part_vals)).begin(); _iter1083 != (*(this->part_vals)).end(); ++_iter1083)
+    std::vector<std::string> ::const_iterator _iter1103;
+    for (_iter1103 = (*(this->part_vals)).begin(); _iter1103 != (*(this->part_vals)).end(); ++_iter1103)
     {
-      xfer += oprot->writeString((*_iter1083));
+      xfer += oprot->writeString((*_iter1103));
     }
     xfer += oprot->writeListEnd();
   }
@@ -16780,14 +17176,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1084;
-            ::apache::thrift::protocol::TType _etype1087;
-            xfer += iprot->readListBegin(_etype1087, _size1084);
-            this->part_vals.resize(_size1084);
-            uint32_t _i1088;
-            for (_i1088 = 0; _i1088 < _size1084; ++_i1088)
+            uint32_t _size1104;
+            ::apache::thrift::protocol::TType _etype1107;
+            xfer += iprot->readListBegin(_etype1107, _size1104);
+            this->part_vals.resize(_size1104);
+            uint32_t _i1108;
+            for (_i1108 = 0; _i1108 < _size1104; ++_i1108)
             {
-              xfer += iprot->readString(this->part_vals[_i1088]);
+              xfer += iprot->readString(this->part_vals[_i1108]);
             }
             xfer += iprot->readListEnd();
           }
@@ -16824,10 +17220,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1089;
-    for (_iter1089 = this->part_vals.begin(); _iter1089 != this->part_vals.end(); ++_iter1089)
+    std::vector<std::string> ::const_iterator _iter1109;
+    for (_iter1109 = this->part_vals.begin(); _iter1109 != this->part_vals.end(); ++_iter1109)
     {
-      xfer += oprot->writeString((*_iter1089));
+      xfer += oprot->writeString((*_iter1109));
     }
     xfer += oprot->writeListEnd();
   }
@@ -16855,10 +17251,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(::
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1090;
-    for (_iter1090 = (*(this->part_vals)).begin(); _iter1090 != (*(this->part_vals)).end(); ++_iter1090)
+    std::vector<std::string> ::const_iterator _iter1110;
+    for (_iter1110 = (*(this->part_vals)).begin(); _iter1110 != (*(this->part_vals)).end(); ++_iter1110)
     {
-      xfer += oprot->writeString((*_iter1090));
+      xfer += oprot->writeString((*_iter1110));
     }
     xfer += oprot->writeListEnd();
   }
@@ -17333,14 +17729,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1091;
-            ::apache::thrift::protocol::TType _etype1094;
-            xfer += iprot->readListBegin(_etype1094, _size1091);
-            this->success.resize(_size1091);
-            uint32_t _i1095;
-            for (_i1095 = 0; _i1095 < _size1091; ++_i1095)
+            uint32_t _size1111;
+            ::apache::thrift::protocol::TType _etype1114;
+            xfer += iprot->readListBegin(_etype1114, _size1111);
+            this->success.resize(_size1111);
+            uint32_t _i1115;
+            for (_i1115 = 0; _i1115 < _size1111; ++_i1115)
             {
-              xfer += iprot->readString(this->success[_i1095]);
+              xfer += iprot->readString(this->success[_i1115]);
             }
             xfer += iprot->readListEnd();
           }
@@ -17379,10 +17775,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1096;
-      for (_iter1096 = this->success.begin(); _iter1096 != this->success.end(); ++_iter1096)
+      std::vector<std::string> ::const_iterator _iter1116;
+      for (_iter1116 = this->success.begin(); _iter1116 != this->success.end(); ++_iter1116)
       {
-        xfer += oprot->writeString((*_iter1096));
+        xfer += oprot->writeString((*_iter1116));
       }
       xfer += oprot->writeListEnd();
     }
@@ -17427,14 +17823,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1097;
-            ::apache::thrift::protocol::TType _etype1100;
-            xfer += iprot->readListBegin(_etype1100, _size1097);
-            (*(this->success)).resize(_size1097);
-            uint32_t _i1101;
-            for (_i1101 = 0; _i1101 < _size1097; ++_i1101)
+            uint32_t _size1117;
+            ::apache::thrift::protocol::TType _etype1120;
+            xfer += iprot->readListBegin(_etype1120, _size1117);
+            (*(this->success)).resize(_size1117);
+            uint32_t _i1121;
+            for (_i1121 = 0; _i1121 < _size1117; ++_i1121)
             {
-              xfer += iprot->readString((*(this->success))[_i1101]);
+              xfer += iprot->readString((*(this->success))[_i1121]);
             }
             xfer += iprot->readListEnd();
           }
@@ -17572,17 +17968,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->success.clear();
-            uint32_t _size1102;
-            ::apache::thrift::protocol::TType _ktype1103;
-            ::apache::thrift::protocol::TType _vtype1104;
-            xfer += iprot->readMapBegin(_ktype1103, _vtype1104, _size1102);
-            uint32_t _i1106;
-            for (_i1106 = 0; _i1106 < _size1102; ++_i1106)
+            uint32_t _size1122;
+            ::apache::thrift::protocol::TType _ktype1123;
+            ::apache::thrift::protocol::TType _vtype1124;
+            xfer += iprot->readMapBegin(_ktype1123, _vtype1124, _size1122);
+            uint32_t _i1126;
+            for (_i1126 = 0; _i1126 < _size1122; ++_i1126)
             {
-              std::string _key1107;
-              xfer += iprot->readString(_key1107);
-              std::string& _val1108 = this->success[_key1107];
-              xfer += iprot->readString(_val1108);
+              std::string _key1127;
+              xfer += iprot->readString(_key1127);
+              std::string& _val1128 = this->success[_key1127];
+              xfer += iprot->readString(_val1128);
             }
             xfer += iprot->readMapEnd();
           }
@@ -17621,11 +18017,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
     {
       xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::map<std::string, std::string> ::const_iterator _iter1109;
-      for (_iter1109 = this->success.begin(); _iter1109 != this->success.end(); ++_iter1109)
+      std::map<std::string, std::string> ::const_iterator _iter1129;
+      for (_iter1129 = this->success.begin(); _iter1129 != this->success.end(); ++_iter1129)
       {
-        xfer += oprot->writeString(_iter1109->first);
-        xfer += oprot->writeString(_iter1109->second);
+        xfer += oprot->writeString(_iter1129->first);
+        xfer += oprot->writeString(_iter1129->second);
       }
       xfer += oprot->writeMapEnd();
     }
@@ -17670,17 +18066,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             (*(this->success)).clear();
-            uint32_t _size1110;
-            ::apache::thrift::protocol::TType _ktype1111;
-            ::apache::thrift::protocol::TType _vtype1112;
-            xfer += iprot->readMapBegin(_ktype1111, _vtype1112, _size1110);
-            uint32_t _i1114;
-            for (_i1114 = 0; _i1114 < _size1110; ++_i1114)
+            uint32_t _size1130;
+            ::apache::thrift::protocol::TType _ktype1131;
+            ::apache::thrift::protocol::TType _vtype1132;
+            xfer += iprot->readMapBegin(_ktype1131, _vtype1132, _size1130);
+            uint32_t _i1134;
+            for (_i1134 = 0; _i1134 < _size1130; ++_i1134)
             {
-              std::string _key1115;
-              xfer += iprot->readString(_key1115);
-              std::string& _val1116 = (*(this->success))[_key1115];
-              xfer += iprot->readString(_val1116);
+              std::string _key1135;
+              xfer += iprot->readString(_key1135);
+              std::string& _val1136 = (*(this->success))[_key1135];
+              xfer += iprot->readString(_val1136);
             }
             xfer += iprot->readMapEnd();
           }
@@ -17755,17 +18151,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->part_vals.clear();
-            uint32_t _size1117;
-            ::apache::thrift::protocol::TType _ktype1118;
-            ::apache::thrift::protocol::TType _vtype1119;
-            xfer += iprot->readMapBegin(_ktype1118, _vtype1119, _size1117);
-            uint32_t _i1121;
-            for (_i1121 = 0; _i1121 < _size1117; ++_i1121)
+            uint32_t _size1137;
+            ::apache::thrift::protocol::TType _ktype1138;
+            ::apache::thrift::protocol::TType _vtype1139;
+            xfer += iprot->readMapBegin(_ktype1138, _vtype1139, _size1137);
+            uint32_t _i1141;
+            for (_i1141 = 0; _i1141 < _size1137; ++_i1141)
             {
-              std::string _key1122;
-              xfer += iprot->readString(_key1122);
-              std::string& _val1123 = this->part_vals[_key1122];
-              xfer += iprot->readString(_val1123);
+              std::string _key1142;
+              xfer += iprot->readString(_key1142);
+              std::string& _val1143 = this->part_vals[_key1142];
+              xfer += iprot->readString(_val1143);
             }
             xfer += iprot->readMapEnd();
           }
@@ -17776,9 +18172,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift::
         break;
       case 4:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast1124;
-          xfer += iprot->readI32(ecast1124);
-          this->eventType = (PartitionEventType::type)ecast1124;
+          int32_t ecast1144;
+          xfer += iprot->readI32(ecast1144);
+          this->eventType = (PartitionEventType::type)ecast1144;
           this->__isset.eventType = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -17812,11 +18208,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift:
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::map<std::string, std::string> ::const_iterator _iter1125;
-    for (_iter1125 = this->part_vals.begin(); _iter1125 != this->part_vals.end(); ++_iter1125)
+    std::map<std::string, std::string> ::const_iterator _iter1145;
+    for (_iter1145 = this->part_vals.begin(); _iter1145 != this->part_vals.end(); ++_iter1145)
     {
-      xfer += oprot->writeString(_iter1125->first);
-      xfer += oprot->writeString(_iter1125->second);
+      xfer += oprot->writeString(_iter1145->first);
+      xfer += oprot->writeString(_iter1145->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -17852,11 +18248,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter1126;
-    for (_iter1126 = (*(this->part_vals)).begin(); _iter1126 != (*(this->part_vals)).end(); ++_iter1126)
+    std::map<std::string, std::string> ::const_iterator _iter1146;
+    for (_iter1146 = (*(this->part_vals)).begin(); _iter1146 != (*(this->part_vals)).end(); ++_iter1146)
     {
-      xfer += oprot->writeString(_iter1126->first);
-      xfer += oprot->writeString(_iter1126->second);
+      xfer += oprot->writeString(_iter1146->first);
+      xfer += oprot->writeString(_iter1146->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -18125,17 +18521,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->part_vals.clear();
-            uint32_t _size1127;
-            ::apache::thrift::protocol::TType _ktype1128;
-            ::apache::thrift::protocol::TType _vtype1129;
-            xfer += iprot->readMapBegin(_ktype1128, _vtype1129, _size1127);
-            uint32_t _i1131;
-            for (_i1131 = 0; _i1131 < _size1127; ++_i1131)
+            uint32_t _size1147;
+            ::apache::thrift::protocol::TType _ktype1148;
+            ::apache::thrift::protocol::TType _vtype1149;
+            xfer += iprot->readMapBegin(_ktype1148, _vtype1149, _size1147);
+            uint32_t _i1151;
+            for (_i1151 = 0; _i1151 < _size1147; ++_i1151)
             {
-              std::string _key1132;
-              xfer += iprot->readString(_key1132);
-              std::string& _val1133 = this->part_vals[_key1132];
-              xfer += iprot->readString(_val1133);
+              std::string _key1152;
+              xfer += iprot->readString(_key1152);
+              std::string& _val1153 = this->part_vals[_key1152];
+              xfer += iprot->readString(_val1153);
             }
             xfer += iprot->readMapEnd();
           }
@@ -18146,9 +18542,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri
         break;
       case 4:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast1134;
-          xfer += iprot->readI32(ecast1134);
-          this->eventType = (PartitionEventType::type)ecast1134;
+          int32_t ecast1154;
+          xfer += iprot->readI32(ecast1154);
+          this->eventType = (PartitionEventType::type)ecast1154;
           this->__isset.eventType = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -18182,11 +18578,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::map<std::string, std::string> ::const_iterator _iter1135;
-    for (_iter1135 = this->part_vals.begin(); _iter1135 != this->part_vals.end(); ++_iter1135)
+    std::map<std::string, std::string> ::const_iterator _iter1155;
+    for (_iter1155 = this->part_vals.begin(); _iter1155 != this->part_vals.end(); ++_iter1155)
     {
-      xfer += oprot->writeString(_iter1135->first);
-      xfer += oprot->writeString(_iter1135->second);
+      xfer += oprot->writeString(_iter1155->first);
+      xfer += oprot->writeString(_iter1155->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -18222,11 +18618,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter1136;
-    for (_iter1136 = (*(this->part_vals)).begin(); _iter1136 != (*(this->part_vals)).end(); ++_iter1136)
+    std::map<std::string, std::string> ::const_iterator _iter1156;
+    for (_iter1156 = (*(this->part_vals)).begin(); _iter1156 != (*(this->part_vals)).end(); ++_iter1156)
     {
-      xfer += oprot->writeString(_iter1136->first);
-      xfer += oprot->writeString(_iter1136->second);
+      xfer += oprot->writeString(_iter1156->first);
+      xfer += oprot->writeString(_iter1156->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -19662,14 +20058,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1137;
-            ::apache::thrift::protocol::TType _etype1140;
-            xfer += iprot->readListBegin(_etype1140, _size1137);
-            this->success.resize(_size1137);
-            uint32_t _i1141;
-            for (_i1141 = 0; _i1141 < _size1137; ++_i1141)
+            uint32_t _size1157;
+            ::apache::thrift::protocol::TType _etype1160;
+            xfer += iprot->readListBegin(_etype1160, _size1157);
+            this->success.resize(_size1157);
+            uint32_t _i1161;
+            for (_i1161 = 0; _i1161 < _size1157; ++_i1161)
             {
-              xfer += this->success[_i1141].read(iprot);
+              xfer += this->success[_i1161].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -19716,10 +20112,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Index> ::const_iterator _iter1142;
-      for (_iter1142 = this->success.begin(); _iter1142 != this->success.end(); ++_iter1142)
+      std::vector<Index> ::const_iterator _iter1162;
+      for (_iter1162 = this->success.begin(); _iter1162 != this->success.end(); ++_iter1162)
       {
-        xfer += (*_iter1142).write(oprot);
+        xfer += (*_iter1162).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -19768,14 +20164,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1143;
-            ::apache::thrift::protocol::TType _etype1146;
-            xfer += iprot->readListBegin(_etype1146, _size1143);
-            (*(this->success)).resize(_size1143);
-            uint32_t _i1147;
-            for (_i1147 = 0; _i1147 < _size1143; ++_i1147)
+            uint32_t _size1163;
+            ::apache::thrift::protocol::TType _etype1166;
+            xfer += iprot->readListBegin(_etype1166, _size1163);
+            (*(this->success)).resize(_size1163);
+            uint32_t _i1167;
+            for (_i1167 = 0; _i1167 < _size1163; ++_i1167)
             {
-              xfer += (*(this->success))[_i1147].read(iprot);
+              xfer += (*(this->success))[_i1167].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -19953,14 +20349,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1148;
-            ::apache::thrift::protocol::TType _etype1151;
-            xfer += iprot->readListBegin(_etype1151, _size1148);
-            this->success.resize(_size1148);
-            uint32_t _i1152;
-            for (_i1152 = 0; _i1152 < _size1148; ++_i1152)
+            uint32_t _size1168;
+            ::apache::thrift::protocol::TType _etype1171;
+            xfer += iprot->readListBegin(_etype1171, _size1168);
+            this->success.resize(_size1168);
+            uint32_t _i1172;
+            for (_i1172 = 0; _i1172 < _size1168; ++_i1172)
             {
-              xfer += iprot->readString(this->success[_i1152]);
+              xfer += iprot->readString(this->success[_i1172]);
             }
             xfer += iprot->readListEnd();
           }
@@ -19999,10 +20395,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1153;
-      for (_iter1153 = this->success.begin(); _iter1153 != this->success.end(); ++_iter1153)
+      std::vector<std::string> ::const_iterator _iter1173;
+      for (_iter1173 = this->success.begin(); _iter1173 != this->success.end(); ++_iter1173)
       {
-        xfer += oprot->writeString((*_iter1153));
+        xfer += oprot->writeString((*_iter1173));
       }
       xfer += oprot->writeListEnd();
     }
@@ -20047,14 +20443,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1154;
-            ::apache::thrift::protocol::TType _etyp

<TRUNCATED>

[39/55] [abbrv] hive git commit: Revert inadvertant addition of HiveConf.java.orig file

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/37f05f41/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
deleted file mode 100644
index b214344..0000000
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
+++ /dev/null
@@ -1,3372 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.conf;
-
-import com.google.common.base.Joiner;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
-import org.apache.hadoop.hive.conf.Validator.PatternSet;
-import org.apache.hadoop.hive.conf.Validator.RangeValidator;
-import org.apache.hadoop.hive.conf.Validator.RatioValidator;
-import org.apache.hadoop.hive.conf.Validator.StringSet;
-import org.apache.hadoop.hive.conf.Validator.TimeValidator;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.hive.shims.Utils;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Shell;
-import org.apache.hive.common.HiveCompat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.security.auth.login.LoginException;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PrintStream;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Hive Configuration.
- */
-public class HiveConf extends Configuration {
-  protected String hiveJar;
-  protected Properties origProp;
-  protected String auxJars;
-  private static final Logger l4j = LoggerFactory.getLogger(HiveConf.class);
-  private static boolean loadMetastoreConfig = false;
-  private static boolean loadHiveServer2Config = false;
-  private static URL hiveDefaultURL = null;
-  private static URL hiveSiteURL = null;
-  private static URL hivemetastoreSiteUrl = null;
-  private static URL hiveServer2SiteUrl = null;
-
-  private static byte[] confVarByteArray = null;
-
-
-  private static final Map<String, ConfVars> vars = new HashMap<String, ConfVars>();
-  private static final Map<String, ConfVars> metaConfs = new HashMap<String, ConfVars>();
-  private final List<String> restrictList = new ArrayList<String>();
-  private final Set<String> hiddenSet = new HashSet<String>();
-
-  private Pattern modWhiteListPattern = null;
-  private volatile boolean isSparkConfigUpdated = false;
-  private static final int LOG_PREFIX_LENGTH = 64;
-
-  public boolean getSparkConfigUpdated() {
-    return isSparkConfigUpdated;
-  }
-
-  public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
-    this.isSparkConfigUpdated = isSparkConfigUpdated;
-  }
-
-  static {
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    if (classLoader == null) {
-      classLoader = HiveConf.class.getClassLoader();
-    }
-
-    hiveDefaultURL = classLoader.getResource("hive-default.xml");
-
-    // Look for hive-site.xml on the CLASSPATH and log its location if found.
-    hiveSiteURL = classLoader.getResource("hive-site.xml");
-    hivemetastoreSiteUrl = classLoader.getResource("hivemetastore-site.xml");
-    hiveServer2SiteUrl = classLoader.getResource("hiveserver2-site.xml");
-
-    for (ConfVars confVar : ConfVars.values()) {
-      vars.put(confVar.varname, confVar);
-    }
-  }
-
-  /**
-   * Metastore related options that the db is initialized against. When a conf
-   * var in this is list is changed, the metastore instance for the CLI will
-   * be recreated so that the change will take effect.
-   */
-  public static final HiveConf.ConfVars[] metaVars = {
-      HiveConf.ConfVars.METASTOREWAREHOUSE,
-      HiveConf.ConfVars.METASTOREURIS,
-      HiveConf.ConfVars.METASTORE_SERVER_PORT,
-      HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
-      HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
-      HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
-      HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
-      HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME,
-      HiveConf.ConfVars.METASTOREPWD,
-      HiveConf.ConfVars.METASTORECONNECTURLHOOK,
-      HiveConf.ConfVars.METASTORECONNECTURLKEY,
-      HiveConf.ConfVars.METASTORESERVERMINTHREADS,
-      HiveConf.ConfVars.METASTORESERVERMAXTHREADS,
-      HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE,
-      HiveConf.ConfVars.METASTORE_INT_ORIGINAL,
-      HiveConf.ConfVars.METASTORE_INT_ARCHIVED,
-      HiveConf.ConfVars.METASTORE_INT_EXTRACTED,
-      HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE,
-      HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL,
-      HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,
-      HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES,
-      HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE,
-      HiveConf.ConfVars.METASTORE_VALIDATE_TABLES,
-      HiveConf.ConfVars.METASTORE_VALIDATE_COLUMNS,
-      HiveConf.ConfVars.METASTORE_VALIDATE_CONSTRAINTS,
-      HiveConf.ConfVars.METASTORE_STORE_MANAGER_TYPE,
-      HiveConf.ConfVars.METASTORE_AUTO_CREATE_SCHEMA,
-      HiveConf.ConfVars.METASTORE_AUTO_START_MECHANISM_MODE,
-      HiveConf.ConfVars.METASTORE_TRANSACTION_ISOLATION,
-      HiveConf.ConfVars.METASTORE_CACHE_LEVEL2,
-      HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE,
-      HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY,
-      HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK,
-      HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS,
-      HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX,
-      HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
-      HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ,
-      HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION,
-      HiveConf.ConfVars.METASTORE_FILTER_HOOK,
-      HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
-      HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS,
-      HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS,
-      HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX,
-      HiveConf.ConfVars.METASTORE_INIT_HOOKS,
-      HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
-      HiveConf.ConfVars.HMSHANDLERATTEMPTS,
-      HiveConf.ConfVars.HMSHANDLERINTERVAL,
-      HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF,
-      HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
-      HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS,
-      HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
-      HiveConf.ConfVars.USERS_IN_ADMIN_ROLE,
-      HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
-      HiveConf.ConfVars.HIVE_TXN_MANAGER,
-      HiveConf.ConfVars.HIVE_TXN_TIMEOUT,
-      HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
-      HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL,
-      HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL,
-      HiveConf.ConfVars.METASTORE_FASTPATH,
-      HiveConf.ConfVars.METASTORE_HBASE_CATALOG_CACHE_SIZE,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_TIME_TO_LIVE,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_WRITER_WAIT,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_READER_WAIT,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_FULL,
-      HiveConf.ConfVars.METASTORE_HBASE_CACHE_CLEAN_UNTIL,
-      HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY,
-      HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL
-      };
-
-  /**
-   * User configurable Metastore vars
-   */
-  public static final HiveConf.ConfVars[] metaConfVars = {
-      HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL,
-      HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL,
-      HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT
-  };
-
-  static {
-    for (ConfVars confVar : metaConfVars) {
-      metaConfs.put(confVar.varname, confVar);
-    }
-  }
-
-  /**
-   * dbVars are the parameters can be set per database. If these
-   * parameters are set as a database property, when switching to that
-   * database, the HiveConf variable will be changed. The change of these
-   * parameters will effectively change the DFS and MapReduce clusters
-   * for different databases.
-   */
-  public static final HiveConf.ConfVars[] dbVars = {
-    HiveConf.ConfVars.HADOOPBIN,
-    HiveConf.ConfVars.METASTOREWAREHOUSE,
-    HiveConf.ConfVars.SCRATCHDIR
-  };
-
-  /**
-   * ConfVars.
-   *
-   * These are the default configuration properties for Hive. Each HiveConf
-   * object is initialized as follows:
-   *
-   * 1) Hadoop configuration properties are applied.
-   * 2) ConfVar properties with non-null values are overlayed.
-   * 3) hive-site.xml properties are overlayed.
-   *
-   * WARNING: think twice before adding any Hadoop configuration properties
-   * with non-null values to this list as they will override any values defined
-   * in the underlying Hadoop configuration.
-   */
-  public static enum ConfVars {
-    // QL execution stuff
-    SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""),
-    PLAN("hive.exec.plan", "", ""),
-    PLAN_SERIALIZATION("hive.plan.serialization.format", "kryo",
-        "Query plan format serialization between client and task nodes. \n" +
-        "Two supported values are : kryo and javaXML. Kryo is default."),
-    STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
-        "Directory name that will be created inside table locations in order to support HDFS encryption. " +
-        "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " +
-        "In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."),
-    SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
-        "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
-        "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
-        "with ${hive.scratch.dir.permission}."),
-    LOCALSCRATCHDIR("hive.exec.local.scratchdir",
-        "${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
-        "Local scratch space for Hive jobs"),
-    DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir",
-        "${system:java.io.tmpdir}" + File.separator + "${hive.session.id}_resources",
-        "Temporary local directory for added resources in the remote file system."),
-    SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700",
-        "The permission for the user specific scratch directories that get created."),
-    SUBMITVIACHILD("hive.exec.submitviachild", false, ""),
-    SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true,
-        "Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \n" +
-        "separate JVM (true recommended) or not. \n" +
-        "Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues."),
-    SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000,
-        "Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" +
-        "This prevents runaway scripts from filling logs partitions to capacity"),
-    ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false,
-        "When enabled, this option allows a user script to exit successfully without consuming \n" +
-        "all the data from the standard input."),
-    STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:",
-        "Streaming jobs that log to standard error with this prefix can log counter or status information."),
-    STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true,
-        "Enable consumption of status and counter messages for streaming jobs."),
-    COMPRESSRESULT("hive.exec.compress.output", false,
-        "This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" +
-        "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
-    COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false,
-        "This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" +
-        "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
-    COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""),
-    COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""),
-    BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000),
-        "size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers."),
-    MAXREDUCERS("hive.exec.reducers.max", 1009,
-        "max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" +
-        "negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."),
-    PREEXECHOOKS("hive.exec.pre.hooks", "",
-        "Comma-separated list of pre-execution hooks to be invoked for each statement. \n" +
-        "A pre-execution hook is specified as the name of a Java class which implements the \n" +
-        "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
-    POSTEXECHOOKS("hive.exec.post.hooks", "",
-        "Comma-separated list of post-execution hooks to be invoked for each statement. \n" +
-        "A post-execution hook is specified as the name of a Java class which implements the \n" +
-        "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
-    ONFAILUREHOOKS("hive.exec.failure.hooks", "",
-        "Comma-separated list of on-failure hooks to be invoked for each statement. \n" +
-        "An on-failure hook is specified as the name of Java class which implements the \n" +
-        "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
-    QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "",
-        "Comma-separated list of hooks to be invoked for each query which can \n" +
-        "tranform the query before it's placed in the job.xml file. Must be a Java class which \n" +
-        "extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."),
-    CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
-        "Comma-separated list of statistics publishers to be invoked on counters on each job. \n" +
-        "A client stats publisher is specified as the name of a Java class which implements the \n" +
-        "org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."),
-    EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"),
-    EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8,
-        "How many jobs at most can be executed in parallel"),
-    HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true,
-        "Whether speculative execution for reducers should be turned on. "),
-    HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L,
-        "The interval with which to poll the JobTracker for the counters the running job. \n" +
-        "The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."),
-    DYNAMICPARTITIONING("hive.exec.dynamic.partition", true,
-        "Whether or not to allow dynamic partitions in DML/DDL."),
-    DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict",
-        "In strict mode, the user must specify at least one static partition\n" +
-        "in case the user accidentally overwrites all partitions.\n" +
-        "In nonstrict mode all partitions are allowed to be dynamic."),
-    DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000,
-        "Maximum number of dynamic partitions allowed to be created in total."),
-    DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100,
-        "Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."),
-    MAXCREATEDFILES("hive.exec.max.created.files", 100000L,
-        "Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."),
-    DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__",
-        "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
-        "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
-        "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
-    DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__", ""),
-
-    // Whether to show a link to the most failed task + debugging tips
-    SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true,
-        "If a job fails, whether to provide a link in the CLI to the task with the\n" +
-        "most failures, along with debugging hints if applicable."),
-    JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true,
-        "Whether or not stack traces parsed from the task logs of a sampled failed task \n" +
-        "for each failed job should be stored in the SessionState"),
-    JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000, ""),
-    TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000, ""),
-    OUTPUT_FILE_EXTENSION("hive.output.file.extension", null,
-        "String used as a file extension for output files. \n" +
-        "If not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise."),
-
-    HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
-
-    HIVE_IN_TEZ_TEST("hive.in.tez.test", false, "internal use only, true when in testing tez",
-        true),
-
-    LOCALMODEAUTO("hive.exec.mode.local.auto", false,
-        "Let Hive determine whether to run in local mode automatically"),
-    LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
-        "When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."),
-    LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4,
-        "When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."),
-
-    DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
-        "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/index/function"),
-
-    HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"),
-
-    HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100,
-        "maximum number of lines for footer user can define for a table file"),
-
-    HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES("hive.resultset.use.unique.column.names", true,
-        "Make column names unique in the result set by qualifying column names with table alias if needed.\n" +
-        "Table alias will be added to column names for queries of type \"select *\" or \n" +
-        "if query explicitly uses table alias \"select r1.x..\"."),
-
-    // Hadoop Configuration Properties
-    // Properties with null values are ignored and exist only for the purpose of giving us
-    // a symbolic name to reference in the Hive source code. Properties with non-null
-    // values will override any values set in the underlying Hadoop configuration.
-    HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true),
-    HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem",
-        "The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"),
-    HADOOPFS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPFS"), null, "", true),
-    HADOOPMAPFILENAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPFILENAME"), null, "", true),
-    HADOOPMAPREDINPUTDIR(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIR"), null, "", true),
-    HADOOPMAPREDINPUTDIRRECURSIVE(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIRRECURSIVE"), false, "", true),
-    MAPREDMAXSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), 256000000L, "", true),
-    MAPREDMINSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), 1L, "", true),
-    MAPREDMINSPLITSIZEPERNODE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERNODE"), 1L, "", true),
-    MAPREDMINSPLITSIZEPERRACK(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERRACK"), 1L, "", true),
-    // The number of reduce tasks per job. Hadoop sets this value to 1 by default
-    // By setting this property to -1, Hive will automatically determine the correct
-    // number of reducers.
-    HADOOPNUMREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPNUMREDUCERS"), -1, "", true),
-    HADOOPJOBNAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPJOBNAME"), null, "", true),
-    HADOOPSPECULATIVEEXECREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPSPECULATIVEEXECREDUCERS"), true, "", true),
-    MAPREDSETUPCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDSETUPCLEANUPNEEDED"), false, "", true),
-    MAPREDTASKCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDTASKCLEANUPNEEDED"), false, "", true),
-
-    // Metastore stuff. Be sure to update HiveConf.metaVars when you add something here!
-    METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse",
-        "location of default database for the warehouse"),
-    METASTOREURIS("hive.metastore.uris", "",
-        "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
-
-    METASTORE_FASTPATH("hive.metastore.fastpath", false,
-        "Used to avoid all of the proxies and object copies in the metastore.  Note, if this is " +
-            "set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " +
-            "undefined and most likely undesired behavior will result"),
-    METASTORE_HBASE_CATALOG_CACHE_SIZE("hive.metastore.hbase.catalog.cache.size", 50000, "Maximum number of " +
-        "objects we will place in the hbase metastore catalog cache.  The objects will be divided up by " +
-        "types that we need to cache."),
-    METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.hbase.aggregate.stats.cache.size", 10000,
-        "Maximum number of aggregate stats nodes that we will place in the hbase metastore aggregate stats cache."),
-    METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.hbase.aggregate.stats.max.partitions", 10000,
-        "Maximum number of partitions that are aggregated per cache node."),
-    METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY("hive.metastore.hbase.aggregate.stats.false.positive.probability",
-        (float) 0.01, "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
-    METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.hbase.aggregate.stats.max.variance", (float) 0.1,
-        "Maximum tolerable variance in number of partitions between a cached node and our request (default 10%)."),
-    METASTORE_HBASE_CACHE_TIME_TO_LIVE("hive.metastore.hbase.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds for a cached node to be active in the cache before they become stale."),
-    METASTORE_HBASE_CACHE_MAX_WRITER_WAIT("hive.metastore.hbase.cache.max.writer.wait", "5000ms", new TimeValidator(TimeUnit.MILLISECONDS),
-        "Number of milliseconds a writer will wait to acquire the writelock before giving up."),
-    METASTORE_HBASE_CACHE_MAX_READER_WAIT("hive.metastore.hbase.cache.max.reader.wait", "1000ms", new TimeValidator(TimeUnit.MILLISECONDS),
-         "Number of milliseconds a reader will wait to acquire the readlock before giving up."),
-    METASTORE_HBASE_CACHE_MAX_FULL("hive.metastore.hbase.cache.max.full", (float) 0.9,
-         "Maximum cache full % after which the cache cleaner thread kicks in."),
-    METASTORE_HBASE_CACHE_CLEAN_UNTIL("hive.metastore.hbase.cache.clean.until", (float) 0.8,
-          "The cleaner thread cleans until cache reaches this % full size."),
-    METASTORE_HBASE_CONNECTION_CLASS("hive.metastore.hbase.connection.class",
-        "org.apache.hadoop.hive.metastore.hbase.VanillaHBaseConnection",
-        "Class used to connection to HBase"),
-    METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES("hive.metastore.hbase.aggr.stats.cache.entries",
-        10000, "How many in stats objects to cache in memory"),
-    METASTORE_HBASE_AGGR_STATS_MEMORY_TTL("hive.metastore.hbase.aggr.stats.memory.ttl", "60s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds stats objects live in memory after they are read from HBase."),
-    METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY(
-        "hive.metastore.hbase.aggr.stats.invalidator.frequency", "5s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "How often the stats cache scans its HBase entries and looks for expired entries"),
-    METASTORE_HBASE_AGGR_STATS_HBASE_TTL("hive.metastore.hbase.aggr.stats.hbase.ttl", "604800s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds stats entries live in HBase cache after they are created.  They may be" +
-            " invalided by updates or partition drops before this.  Default is one week."),
-
-    METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3,
-        "Number of retries while opening a connection to metastore"),
-    METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
-        "Number of retries upon failure of Thrift metastore calls"),
-    METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"),
-    METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds for the client to wait between consecutive connection attempts"),
-    METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "MetaStore Client socket timeout in seconds"),
-    METASTORE_CLIENT_SOCKET_LIFETIME("hive.metastore.client.socket.lifetime", "0s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
-        "reconnects on the next MetaStore operation. A value of 0s means the connection\n" +
-        "has an infinite lifetime."),
-    METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine",
-        "password to use against metastore database"),
-    METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "",
-        "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"),
-    METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true,
-        "Set this to true if multiple threads access metastore through JDO concurrently."),
-    METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL",
-        "jdbc:derby:;databaseName=metastore_db;create=true",
-        "JDBC connect string for a JDBC metastore"),
-    HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10,
-        "The number of times to retry a HMSHandler call if there were a connection error."),
-    HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms",
-        new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."),
-    HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
-        "Whether to force reloading of the HMSHandler configuration (including\n" +
-        "the connection URL, before the next metastore query that accesses the\n" +
-        "datastore. Once reloaded, this value is reset to false. Used for\n" +
-        "testing only."),
-    METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024,
-        "Maximum message size in bytes a HMS will accept."),
-    METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200,
-        "Minimum number of worker threads in the Thrift server's pool."),
-    METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000,
-        "Maximum number of worker threads in the Thrift server's pool."),
-    METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true,
-        "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
-
-    METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
-        "_INTERMEDIATE_ORIGINAL",
-        "Intermediate dir suffixes used for archiving. Not important what they\n" +
-        "are, as long as collisions are avoided"),
-    METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived",
-        "_INTERMEDIATE_ARCHIVED", ""),
-    METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted",
-        "_INTERMEDIATE_EXTRACTED", ""),
-    METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", "",
-        "The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."),
-    METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal",
-        "hive-metastore/_HOST@EXAMPLE.COM",
-        "The service principal for the metastore Thrift server. \n" +
-        "The special string _HOST will be replaced automatically with the correct host name."),
-    METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false,
-        "If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
-    METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false,
-        "If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
-    METASTORE_USE_THRIFT_COMPACT_PROTOCOL("hive.metastore.thrift.compact.protocol.enabled", false,
-        "If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" +
-        "Setting it to true will break compatibility with older clients running TBinaryProtocol."),
-    METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class",
-        "org.apache.hadoop.hive.thrift.MemoryTokenStore",
-        "The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."),
-    METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR(
-        "hive.cluster.delegation.token.store.zookeeper.connectString", "",
-        "The ZooKeeper token store connect string. You can re-use the configuration value\n" +
-        "set in hive.zookeeper.quorum, by leaving this parameter unset."),
-    METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE(
-        "hive.cluster.delegation.token.store.zookeeper.znode", "/hivedelegation",
-        "The root path for token store data. Note that this is used by both HiveServer2 and\n" +
-        "MetaStore to store delegation Token. One directory gets created for each of them.\n" +
-        "The final directory names would have the servername appended to it (HIVESERVER2,\n" +
-        "METASTORE)."),
-    METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL(
-        "hive.cluster.delegation.token.store.zookeeper.acl", "",
-        "ACL for token store entries. Comma separated list of ACL entries. For example:\n" +
-        "sasl:hive/host1@MY.DOMAIN:cdrwa,sasl:hive/host2@MY.DOMAIN:cdrwa\n" +
-        "Defaults to all permissions for the hiveserver2/metastore process user."),
-    METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
-        "List of comma separated metastore object types that should be pinned in the cache"),
-    METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "BONECP",
-        "Specify connection pool library for datanucleus"),
-    METASTORE_VALIDATE_TABLES("datanucleus.validateTables", false,
-        "validates existing schema against code. turn this on if you want to verify existing schema"),
-    METASTORE_VALIDATE_COLUMNS("datanucleus.validateColumns", false,
-        "validates existing schema against code. turn this on if you want to verify existing schema"),
-    METASTORE_VALIDATE_CONSTRAINTS("datanucleus.validateConstraints", false,
-        "validates existing schema against code. turn this on if you want to verify existing schema"),
-    METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms", "metadata store type"),
-    METASTORE_AUTO_CREATE_SCHEMA("datanucleus.autoCreateSchema", true,
-        "creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once"),
-    METASTORE_FIXED_DATASTORE("datanucleus.fixedDatastore", false, ""),
-    METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", false,
-        "Enforce metastore schema version consistency.\n" +
-        "True: Verify that version information stored in metastore matches with one from Hive jars.  Also disable automatic\n" +
-        "      schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" +
-        "      proper metastore schema migration. (Default)\n" +
-        "False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."),
-    METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION("hive.metastore.schema.verification.record.version", true,
-      "When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" +
-      " enabled the MS will be unusable."),
-    METASTORE_AUTO_START_MECHANISM_MODE("datanucleus.autoStartMechanismMode", "checked",
-        "throw exception if metadata tables are incorrect"),
-    METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed",
-        "Default transaction isolation level for identity generation."),
-    METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false,
-        "Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"),
-    METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none", ""),
-    METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1",
-        "Name of the identifier factory to use when generating table/column names etc. \n" +
-        "'datanucleus1' is used for backward compatibility with DataNucleus v1"),
-    METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
-    METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG",
-        "Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"),
-    METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300,
-        "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" +
-        "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" +
-        "but it may also cause higher memory requirement at the client side."),
-    METASTORE_BATCH_RETRIEVE_OBJECTS_MAX(
-        "hive.metastore.batch.retrieve.table.partition.max", 1000,
-        "Maximum number of objects that metastore internally retrieves in one batch."),
-
-    METASTORE_INIT_HOOKS("hive.metastore.init.hooks", "",
-        "A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" +
-        "An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."),
-    METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "",
-        "List of comma separated listeners for metastore events."),
-    METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "", ""),
-    METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "time after which events will be removed from the database listener queue"),
-    METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false,
-        "Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
-        "for operations like drop-partition (disallow the drop-partition if the user in\n" +
-        "question doesn't have permissions to delete the corresponding directory\n" +
-        "on the storage)."),
-    METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", "0s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Frequency at which timer task runs to purge expired events in metastore."),
-    METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", "0s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "Duration after which events expire from events table"),
-    METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true,
-        "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
-        "the client's reported user and group permissions. Note that this property must be set on \n" +
-        "both the client and server sides. Further note that its best effort. \n" +
-        "If client sets its to true and server sets it to false, client setting will be ignored."),
-    METASTORE_PARTITION_NAME_WHITELIST_PATTERN("hive.metastore.partition.name.whitelist.pattern", "",
-        "Partition names will be checked against this regex pattern and rejected if not matched."),
-
-    METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false,
-        "Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" +
-        "improves metastore perf for integral columns, especially if there's a large number of partitions.\n" +
-        "However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" +
-        "leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" +
-        "is also irrelevant."),
-    METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true,
-        "Whether the Hive metastore should try to use direct SQL queries instead of the\n" +
-        "DataNucleus for certain read paths. This can improve metastore performance when\n" +
-        "fetching many partitions or column statistics by orders of magnitude; however, it\n" +
-        "is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" +
-        "the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" +
-        "work for all queries on your datastore. If all SQL queries fail (for example, your\n" +
-        "metastore is backed by MongoDB), you might want to disable this to save the\n" +
-        "try-and-fall-back cost."),
-    METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE("hive.metastore.direct.sql.batch.size", 0,
-        "Batch size for partition and other object retrieval from the underlying DB in direct\n" +
-        "SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" +
-        "that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" +
-        "may impede performance. -1 means no batching, 0 means automatic batching."),
-    METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true,
-        "Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" +
-        "modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" +
-        "select query has incorrect syntax or something similar inside a transaction, the\n" +
-        "entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" +
-        "should disable the usage of direct SQL inside transactions if that happens in your case."),
-    METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
-        "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " +
-        "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " +
-        "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
-        "pruning is the correct behaviour"),
-    METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
-        "hive.metastore.disallow.incompatible.col.type.changes", false,
-        "If true (default is false), ALTER TABLE operations which change the type of a\n" +
-        "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
-        "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +
-        "datatypes can be converted from string to any type. The map is also serialized as\n" +
-        "a string, which can be read as a string as well. However, with any binary\n" +
-        "serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" +
-        "when subsequently trying to access old partitions.\n" +
-        "\n" +
-        "Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" +
-        "not blocked.\n" +
-        "\n" +
-        "See HIVE-4409 for more details."),
-
-    NEWTABLEDEFAULTPARA("hive.table.parameters.default", "",
-        "Default property values for newly created tables"),
-    DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", "",
-        "Table Properties to copy over when executing a Create Table Like."),
-    METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
-        "Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" +
-        "This class is used to store and retrieval of raw metadata objects such as table, database"),
-    METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver",
-        "Driver class name for a JDBC metastore"),
-    METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
-        "org.datanucleus.api.jdo.JDOPersistenceManagerFactory",
-        "class implementing the jdo persistence"),
-    METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy",
-        "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", ""),
-    METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true,
-        "Detaches all objects from session so that they can be used after transaction is committed"),
-    METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true,
-        "Reads outside of transactions"),
-    METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP",
-        "Username to use against metastore database"),
-    METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", "",
-        "List of comma separated listeners for the end of metastore functions."),
-    METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "",
-        "List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" +
-        "* implies all the keys will get inherited."),
-    METASTORE_FILTER_HOOK("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl",
-        "Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager"
-        + "is set to instance of HiveAuthorizerFactory, then this value is ignored."),
-    FIRE_EVENTS_FOR_DML("hive.metastore.dml.events", false, "If true, the metastore will be asked" +
-        " to fire events for DML operations"),
-    METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS("hive.metastore.client.drop.partitions.using.expressions", true,
-        "Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, " +
-            "or drops partitions iteratively"),
-
-    METASTORE_AGGREGATE_STATS_CACHE_ENABLED("hive.metastore.aggregate.stats.cache.enabled", true,
-        "Whether aggregate stats caching is enabled or not."),
-    METASTORE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.aggregate.stats.cache.size", 10000,
-        "Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.aggregate.stats.cache.max.partitions", 10000,
-        "Maximum number of partitions that are aggregated per cache node."),
-    METASTORE_AGGREGATE_STATS_CACHE_FPP("hive.metastore.aggregate.stats.cache.fpp", (float) 0.01,
-        "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.aggregate.stats.cache.max.variance", (float) 0.01,
-        "Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."),
-    METASTORE_AGGREGATE_STATS_CACHE_TTL("hive.metastore.aggregate.stats.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS),
-        "Number of seconds for a cached node to be active in the cache before they become stale."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("hive.metastore.aggregate.stats.cache.max.writer.wait", "5000ms",
-        new TimeValidator(TimeUnit.MILLISECONDS),
-        "Number of milliseconds a writer will wait to acquire the writelock before giving up."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT("hive.metastore.aggregate.stats.cache.max.reader.wait", "1000ms",
-        new TimeValidator(TimeUnit.MILLISECONDS),
-        "Number of milliseconds a reader will wait to acquire the readlock before giving up."),
-    METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL("hive.metastore.aggregate.stats.cache.max.full", (float) 0.9,
-        "Maximum cache full % after which the cache cleaner thread kicks in."),
-    METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL("hive.metastore.aggregate.stats.cache.clean.until", (float) 0.8,
-        "The cleaner thread cleans until cache reaches this % full size."),
-    METASTORE_METRICS("hive.metastore.metrics.enabled", false, "Enable metrics on the metastore."),
-
-    // Parameters for exporting metadata on table drop (requires the use of the)
-    // org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
-    METADATA_EXPORT_LOCATION("hive.metadata.export.location", "",
-        "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
-        "it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" +
-        "metadata being exported to the current user's home directory on HDFS."),
-    MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true,
-        "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
-        "this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" +
-        "alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."),
-
-    // CLI
-    CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""),
-    CLIPRINTCURRENTDB("hive.cli.print.current.db", false,
-        "Whether to include the current database in the Hive prompt."),
-    CLIPROMPT("hive.cli.prompt", "hive",
-        "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" +
-        "Variable substitution will only be invoked at the Hive CLI startup."),
-    CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1,
-        "The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.\n" +
-        "If the value of this property is -1, then Hive will use the auto-detected terminal width."),
-
-    HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
-
-    // Things we log in the jobconf
-
-    // session identifier
-    HIVESESSIONID("hive.session.id", "", ""),
-    // whether session is running in silent mode or not
-    HIVESESSIONSILENT("hive.session.silent", false, ""),
-
-    HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false,
-        "Whether to log Hive query, query plan, runtime statistics etc."),
-
-    HIVEQUERYSTRING("hive.query.string", "",
-        "Query being executed (might be multiple per a session)"),
-
-    HIVEQUERYID("hive.query.id", "",
-        "ID for query being executed (might be multiple per a session)"),
-
-    HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"),
-
-    // hive jar
-    HIVEJAR("hive.jar.path", "",
-        "The location of hive_cli.jar that is used when submitting jobs in a separate jvm."),
-    HIVEAUXJARS("hive.aux.jars.path", "",
-        "The location of the plugin jars that contain implementations of user defined functions and serdes."),
-
-    // reloadable jars
-    HIVERELOADABLEJARS("hive.reloadable.aux.jars.path", "",
-        "Jars can be renewed by executing reload command. And these jars can be "
-            + "used as the auxiliary classes like creating a UDF or SerDe."),
-
-    // hive added files and jars
-    HIVEADDEDFILES("hive.added.files.path", "", "This an internal parameter."),
-    HIVEADDEDJARS("hive.added.jars.path", "", "This an internal parameter."),
-    HIVEADDEDARCHIVES("hive.added.archives.path", "", "This an internal parameter."),
-
-    HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true),
-
-    // for hive script operator
-    HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", "0s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "How long to run autoprogressor for the script/UDTF operators.\n" +
-        "Set to 0 for forever."),
-    HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false,
-        "Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" +
-        "to avoid the task getting killed because of inactivity.  Hive sends progress information when the script is \n" +
-        "outputting to stderr.  This option removes the need of periodically producing stderr messages, \n" +
-        "but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."),
-    HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID",
-        "Name of the environment variable that holds the unique script operator ID in the user's \n" +
-        "transform function (the custom mapper/reducer that the user has specified in the query)"),
-    HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false,
-        "Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"),
-    HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist",
-        "hive.txn.valid.txns,hive.script.operator.env.blacklist",
-        "Comma separated list of keys from the configuration file not to convert to environment " +
-        "variables when envoking the script operator"),
-    HIVEMAPREDMODE("hive.mapred.mode", "nonstrict",
-        "The mode in which the Hive operations are being performed. \n" +
-        "In strict mode, some risky queries are not allowed to run. They include:\n" +
-        "  Cartesian Product.\n" +
-        "  No partition being picked up for a query.\n" +
-        "  Comparing bigints and strings.\n" +
-        "  Comparing bigints and doubles.\n" +
-        "  Orderby without limit."),
-    HIVEALIAS("hive.alias", "", ""),
-    HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"),
-    HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"),
-    HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000,
-        "How many rows in the right-most join operand Hive should buffer before emitting the join result."),
-    HIVEJOINCACHESIZE("hive.join.cache.size", 25000,
-        "How many rows in the joining tables (except the streaming table) should be cached in memory."),
-
-    // CBO related
-    HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost Based Optimizations using Calcite framework."),
-    HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to control calcite plan to hive operator conversion"),
-    HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag to control enabling the extended cost model based on"
-                                 + "CPU, IO and cardinality. Otherwise, the cost model is based on cardinality."),
-    HIVE_CBO_COST_MODEL_CPU("hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison"),
-    HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transfering a byte over network;"
-                                                                  + " expressed as multiple of CPU cost"),
-    HIVE_CBO_COST_MODEL_LFS_WRITE("hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS;"
-                                                                             + " expressed as multiple of NETWORK cost"),
-    HIVE_CBO_COST_MODEL_LFS_READ("hive.cbo.costmodel.local.fs.read", "4.0", "Default cost of reading a byte from local FS;"
-                                                                           + " expressed as multiple of NETWORK cost"),
-    HIVE_CBO_COST_MODEL_HDFS_WRITE("hive.cbo.costmodel.hdfs.write", "10.0", "Default cost of writing a byte to HDFS;"
-                                                                 + " expressed as multiple of Local FS write cost"),
-    HIVE_CBO_COST_MODEL_HDFS_READ("hive.cbo.costmodel.hdfs.read", "1.5", "Default cost of reading a byte from HDFS;"
-                                                                 + " expressed as multiple of Local FS read cost"),
-    AGGR_JOIN_TRANSPOSE("hive.transpose.aggr.join", false, "push aggregates through join"),
-
-    // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
-    // need to remove by hive .13. Also, do not change default (see SMB operator)
-    HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""),
-
-    HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
-        "Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,\n" +
-        "because memory-optimized hashtable cannot be serialized."),
-    HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent",
-        (float) 0.5, "Probing space percentage of the optimized hashtable"),
-    HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" +
-        "grace hash join as the join method for mapjoin. Tez only."),
-    HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
-        "hybrid grace hash join, how often (how many rows apart) we check if memory is full. " +
-        "This number should be power of 2."),
-    HIVEHYBRIDGRACEHASHJOINMINWBSIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" +
-        "Hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB."),
-    HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" +
-        "Hybrid grace hash join, the minimum number of partitions to create."),
-    HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024,
-        "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
-        "store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
-        "joins unnecessary memory will be allocated and then trimmed."),
-
-    HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000,
-        "How many rows with the same key value should be cached in memory per smb joined table."),
-    HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000,
-        "Number of rows after which size of the grouping keys/aggregation classes is performed"),
-    HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5,
-        "Portion of total memory to be used by map-side group aggregation hash table"),
-    HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3,
-        "Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"),
-    HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9,
-        "The max memory to be used by map-side group aggregation hash table.\n" +
-        "If the memory usage is higher than this number, force to flush data"),
-    HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.5,
-        "Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number. \n" +
-        "Set to 1 to make sure hash aggregation is never turned off."),
-    HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true,
-        "Whether to optimize multi group by query to generate single M/R  job plan. If the multi group by query has \n" +
-        "common group by keys, it will be optimized to generate single M/R job."),
-    HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", false,
-        "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
-        "the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" +
-        "is that it limits the number of mappers to the number of files."),
-    HIVE_MAP_GROUPBY_SORT_TESTMODE("hive.map.groupby.sorted.testmode", false,
-        "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
-        "the group by in the mapper by using BucketizedHiveInputFormat. If the test mode is set, the plan\n" +
-        "is not converted, but a query property is set to denote the same."),
-    HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false,
-        "Whether to enable using Column Position Alias in Group By or Order By"),
-    HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30,
-        "Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\n" +
-        "For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n" +
-        "4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).\n" +
-        "This can lead to explosion across map-reduce boundary if the cardinality of T is very high,\n" +
-        "and map-side aggregation does not do a very good job. \n" +
-        "\n" +
-        "This parameter decides if Hive should add an additional map-reduce job. If the grouping set\n" +
-        "cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
-        "assumption that the original group by will reduce the data size."),
-
-    // Max filesize used to do a single copy (after that, distcp is used)
-    HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
-        "Maximum file size (in Mb) that Hive uses to do single HDFS copies between directories." +
-        "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
-
-    // for hive udtf operator
-    HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,
-        "Whether Hive should automatically send progress information to TaskTracker \n" +
-        "when using UDTF's to prevent the task getting killed because of inactivity.  Users should be cautious \n" +
-        "because this may prevent TaskTracker from killing tasks with infinite loops."),
-
-    HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC"),
-        "Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"),
-    HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none",
-  new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
-  "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" +
-  "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" +
-  "for all tables."),
-    HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile"),
-        "Default file format for storing result of the query."),
-    HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"),
-
-    // default serde for rcfile
-    HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde",
-        "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe",
-        "The default SerDe Hive will use for the RCFile format"),
-
-    HIVEDEFAULTSERDE("hive.default.serde",
-        "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
-        "The default SerDe Hive will use for storage formats that do not specify a SerDe."),
-
-    SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema",
-        "org.apache.hadoop.hive.ql.io.orc.OrcSerde," +
-        "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," +
-        "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," +
-        "org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," +
-        "org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," +
-        "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," +
-        "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," +
-        "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe",
-        "SerDes retrieving schema from metastore. This is an internal parameter."),
-
-    HIVEHISTORYFILELOC("hive.querylog.location",
-        "${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
-        "Location of Hive run time structured log file"),
-
-    HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true,
-        "Whether to log the plan's progress every time a job's progress is checked.\n" +
-        "These logs are written to the location specified by hive.querylog.location"),
-
-    HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", "60000ms",
-        new TimeValidator(TimeUnit.MILLISECONDS),
-        "The interval to wait between logging the plan's progress.\n" +
-        "If there is a whole number percentage change in the progress of the mappers or the reducers,\n" +
-        "the progress is logged regardless of this value.\n" +
-        "The actual interval will be the ceiling of (this value divided by the value of\n" +
-        "hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval\n" +
-        "I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be\n" +
-        "logged less frequently than specified.\n" +
-        "This only has an effect if hive.querylog.enable.plan.progress is set to true."),
-
-    HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
-        "The default SerDe for transmitting input data to and reading output data from the user scripts. "),
-    HIVESCRIPTRECORDREADER("hive.script.recordreader",
-        "org.apache.hadoop.hive.ql.exec.TextRecordReader",
-        "The default record reader for reading data from the user scripts. "),
-    HIVESCRIPTRECORDWRITER("hive.script.recordwriter",
-        "org.apache.hadoop.hive.ql.exec.TextRecordWriter",
-        "The default record writer for writing data to the user scripts. "),
-    HIVESCRIPTESCAPE("hive.transform.escape.input", false,
-        "This adds an option to escape special chars (newlines, carriage returns and\n" +
-        "tabs) when they are passed to the user script. This is useful if the Hive tables\n" +
-        "can contain data that contains special characters."),
-    HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000,
-        "Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" +
-        "The last record before the end of stream can have less than hive.binary.record.max.length bytes"),
-
-    // HWI
-    HIVEHWILISTENHOST("hive.hwi.listen.host", "0.0.0.0", "This is the host address the Hive Web Interface will listen on"),
-    HIVEHWILISTENPORT("hive.hwi.listen.port", "9999", "This is the port the Hive Web Interface will listen on"),
-    HIVEHWIWARFILE("hive.hwi.war.file", "${env:HWI_WAR_FILE}",
-        "This sets the path to the HWI war file, relative to ${HIVE_HOME}. "),
-
-    HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"),
-
-    //small table file size
-    HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L,
-        "The threshold for the input file size of the small tables; if the file size is smaller \n" +
-        "than this threshold, it will try to convert the common join into map join"),
-
-    HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0,
-        "A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."),
-
-    // test mode in hive mode
-    HIVETESTMODE("hive.test.mode", false,
-        "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.",
-        false),
-    HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_",
-        "In test mode, specfies prefixes for the output table", false),
-    HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32,
-        "In test mode, specfies sampling frequency for table, which is not bucketed,\n" +
-        "For example, the following query:\n" +
-        "  INSERT OVERWRITE TABLE dest SELECT col1 from src\n" +
-        "would be converted to\n" +
-        "  INSERT OVERWRITE TABLE test_dest\n" +
-        "  SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", false),
-    HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "",
-        "In test mode, specifies comma separated table names which would not apply sampling", false),
-    HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false),
-    HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false),
-    HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false),
-
-    HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
-        "Merge small files at the end of a map-only job"),
-    HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false,
-        "Merge small files at the end of a map-reduce job"),
-    HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"),
-    HIVEMERGESPARKFILES("hive.merge.sparkfiles", false, "Merge small files at the end of a Spark DAG Transformation"),
-    HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000),
-        "Size of merged files at the end of the job"),
-    HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000),
-        "When the average output file size of a job is less than this number, Hive will start an additional \n" +
-        "map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" +
-        "if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."),
-    HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
-    HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true,
-        "When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\n" +
-        "while writing a table with ORC file format, enabling this config will do stripe-level\n" +
-        "fast merge for small ORC files. Note that enabling this config will not honor the\n" +
-        "padding tolerance config (hive.exec.orc.block.padding.tolerance)."),
-
-    HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true,
-        "If this is set the header for RCFiles will simply be RCF.  If this is not\n" +
-        "set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" +
-        "by the input and output RCFile formats."),
-    HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""),
-
-    HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE, ""),
-    HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0, ""),
-    HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false, ""),
-    HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304, ""),   // 4M
-
-    PARQUET_MEMORY_POOL_RATIO("parquet.memory.pool.ratio", 0.5f,
-        "Maximum fraction of heap that can be used by Parquet file writers in one task.\n" +
-        "It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.\n" +
-        "This config parameter is defined in Parquet, so that it does not start with 'hive.'."),
-    HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion", true,
-      "Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversion" +
-      "on reading parquet files from other tools"),
-    HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS("hive.int.timestamp.conversion.in.seconds", false,
-        "Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.\n" +
-        "Set this flag to true to interpret the value as seconds to be consistent with float/double." ),
-    HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f,
-        "Maximum fraction of heap that can be used by ORC file writers"),
-    HIVE_ORC_WRITE_FORMAT("hive.exec.orc.write.format", null,
-        "Define the version of the file to write. Possible values are 0.11 and 0.12.\n" +
-        "If this parameter is not defined, ORC will use the run length encoding (RLE)\n" +
-        "introduced in Hive 0.12. Any value other than 0.11 results in the 0.12 encoding."),
-    HIVE_ORC_DEFAULT_STRIPE_SIZE("hive.exec.orc.default.stripe.size",
-        64L * 1024 * 1024,
-        "Define the default ORC stripe size, in bytes."),
-    HIVE_ORC_DEFAULT_BLOCK_SIZE("hive.exec.orc.default.block.size", 256L * 1024 * 1024,
-        "Define the default file system block size for ORC files."),
-
-    HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD("hive.exec.orc.dictionary.key.size.threshold", 0.8f,
-        "If the number of keys in a dictionary is greater than this fraction of the total number of\n" +
-        "non-null rows, turn off dictionary encoding.  Use 1 to always use dictionary encoding."),
-    HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE("hive.exec.orc.default.row.index.stride", 10000,
-        "Define the default ORC index stride in number of rows. (Stride is the number of rows\n" +
-        "an index entry represents.)"),
-    HIVE_ORC_ROW_INDEX_STRIDE_DICTIONARY_CHECK("hive.orc.row.index.stride.dictionary.check", true,
-        "If enabled dictionary check will happen after first row index stride (default 10000 rows)\n" +
-        "else dictionary check will happen before writing first stripe. In both cases, the decision\n" +
-        "to use dictionary or not will be retained thereafter."),
-    HIVE_ORC_DEFAULT_BUFFER_SIZE("hive.exec.orc.default.buffer.size", 256 * 1024,
-        "Define the default ORC buffer size, in bytes."),
-    HIVE_ORC_DEFAULT_BLOCK_PADDING("hive.exec.orc.default.block.padding", true,
-        "Define the default block padding, which pads stripes to the HDFS block boundaries."),
-    HIVE_ORC_BLOCK_PADDING_TOLERANCE("hive.exec.orc.block.padding.tolerance", 0.05f,
-        "Define the tolerance for block padding as a decimal fraction of stripe size (for\n" +
-        "example, the default value 0.05 is 5% of the stripe size). For the defaults of 64Mb\n" +
-        "ORC stripe and 256Mb HDFS blocks, the default block padding tolerance of 5% will\n" +
-        "reserve a maximum of 3.2Mb for padding within the 256Mb block. In that case, if the\n" +
-        "available size within the block is more than 3.2Mb, a new smaller stripe will be\n" +
-        "inserted to fit within that space. This will make sure that no stripe written will\n" +
-        "cross block boundaries and cause remote reads within a node local task."),
-    HIVE_ORC_DEFAULT_COMPRESS("hive.exec.orc.default.compress", "ZLIB", "Define the default compression codec for ORC file"),
-
-    HIVE_ORC_ENCODING_STRATEGY("hive.exec.orc.encoding.strategy", "SPEED", new StringSet("SPEED", "COMPRESSION"),
-        "Define the encoding strategy to use while writing data. Changing this will\n" +
-        "only affect the light weight encoding for integers. This flag will not\n" +
-        "change the compression level of higher level compression codec (like ZLIB)."),
-
-    HIVE_ORC_COMPRESSION_STRATEGY("hive.exec.orc.compression.strategy", "SPEED", new StringSet("SPEED", "COMPRESSION"),
-         "Define the compression strategy to use while writing data. \n" +
-         "This changes the compression level of higher level compression codec (like ZLIB)."),
-
-    HIVE_ORC_SPLIT_STRATEGY("hive.exec.orc.split.strategy", "HYBRID", new StringSet("HYBRID", "BI", "ETL"),
-        "This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation" +
-        " as opposed to query execution (split generation does not read or cache file footers)." +
-        " ETL strategy is used when spending little more time in split generation is acceptable" +
-        " (split generation reads and caches file footers). HYBRID chooses between the above strategies" +
-        " based on heuristics."),
-
-    HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", false,
-        "Whether to enable using file metadata cache in metastore for ORC file footers."),
-
-    HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
-        "If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
-        "data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
-    HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
-        "Include file ID in splits on file systems thaty support it."),
-    HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000,
-        "Max cache size for keeping meta info about orc splits cached in the client."),
-    HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
-        "How many threads orc should use to create splits in parallel."),
-    HIVE_ORC_SKIP_CORRUPT_DATA("hive.exec.orc.skip.corrupt.data", false,
-        "If ORC reader encounters corrupt data, this value will be used to determine\n" +
-        "whether to skip the corrupt data or throw exception. The default behavior is to throw exception."),
-
-    HIVE_ORC_ZEROCOPY("hive.exec.orc.zerocopy", false,
-        "Use zerocopy reads with ORC. (This requires Hadoop 2.3 or later.)"),
-
-    HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL("hive.lazysimple.extended_boolean_literal", false,
-        "LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',\n" +
-        "'1', and '0' as extened, legal boolean literal, in addition to 'TRUE' and 'FALSE'.\n" +
-        "The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" +
-        "boolean literal."),
-
-    HIVESKEWJOIN("hive.optimize.skewjoin", false,
-        "Whether to enable skew join optimization. \n" +
-        "The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" +
-        "processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" +
-        "job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" +
-        "the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" +
-        "map-join."),
-    HIVEDYNAMICPARTITIONHASHJOIN("hive.optimize.dynamic.partition.hashjoin", false,
-        "Whether to enable dynamically partitioned hash join optimization. \n" +
-        "This setting is also dependent on enabling hive.auto.convert.join"),
-    HIVECONVERTJOIN("hive.auto.convert.join", true,
-        "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"),
-    HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true,
-        "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" +
-        "If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" +
-        "specified size, the join is directly converted to a mapjoin (there is no conditional task)."),
-
-    HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size",
-        10000000L,
-        "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
-        "However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" +
-        "the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"),
-    HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false,
-        "For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" +
-        "filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" +
-        "Currently, this is not working with vectorization or tez execution engine."),
-    HIVESKEWJOINKEY("hive.skewjoin.key", 100000,
-        "Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" +
-        "we think the key as a skew join key. "),
-    HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000,
-        "Determine the number of map task used in the follow up map join job for a skew join.\n" +
-        "It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control."),
-    HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L,
-        "Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" +
-        "the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control."),
-
-    HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000,
-        "Send a heartbeat after this interval - used by mapjoin and filter operators"),
-    HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L,
-        "When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."),
-    HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10,
-        "When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."),
-    HIVELIMITOPTENABLE("hive.limit.optimize.enable", false,
-        "Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."),
-    HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000,
-        "Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" +
-        "Insert queries are not restricted by this limit."),
-    HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", -1f,
-        "The max memory to be used for hash in RS operator for top K selection."),
-    HIVELIMITTABLESCANPARTITION("hive.limit.query.max.table.partition", -1,
-        "This controls how many partitions can be scanned for each partitioned table.\n" +
-        "The default value \"-1\" means no limit."),
-
-    HIVEHASHTABLEKEYCOUNTADJUSTMENT("hive.hashtable.key.count.adjustment", 1.0f,
-        "Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate" +
-        " of the number of keys is divided by this value. If the value is 0, statistics are not used" +
-        "and hive.hashtable.initialCapacity is used instead."),
-    HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " +
-        "mapjoin hashtable if statistics are absent, or if hive.hashtable.stats.key.estimate.adjustment is set to 0"),
-    HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""),
-    HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55,
-        "This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" +
-        "when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" +
-        "the local task will abort by itself. It means the data of the small table is too large to be held in memory."),
-    HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90,
-        "This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" +
-        "If the local task's memory usage is more than this number, the local task will abort by itself. \n" +
-        "It means the data of the small table is too large to be held in memory."),
-    HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000,
-        "The number means after how many rows processed it needs to check the memory usage"),
-
-    HIVEDEBUGLOCALTASK("hive.debug.localtask",false, ""),
-
-    HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat",
-        "The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."),
-    HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat",
-        "The default input format for tez. Tez groups splits in the AM."),
-
-    HIVETEZCONTAINERSIZE("hive.tez.container.size", -1,
-        "By default Tez will spawn containers of the size of a mapper. This can be used to overwrite."),
-    HIVETEZCPUVCORES("hive.tez.cpu.vcores", -1,
-        "By default Tez will ask for however many cpus map-reduce is configured to use per container.\n" +
-        "This can be used to overwrite."),
-    HIVETEZJAVAOPTS("hive.tez.java.opts", null,
-        "By default Tez will use the Java options from map tasks. This can be used to overwrite."),
-    HIVETEZLOGLEVEL("hive.tez.log.level", "INFO",
-        "The log level to use for tasks executing as part of the DAG.\n" +
-        "Used only if hive.tez.java.opts is used to configure Java options."),
-
-    HIVEENFORCEBUCKETING("hive.enforce.bucketing", false,
-        "Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced."),
-    HIVEENFORCESORTING("hive.enforce.sorting", false,
-        "Whether sorting is enforced. If true, while inserting into the table, sorting is enforced."),
-    HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
-        "If hive.enforce.bucketing or hive.enforce.sorting is true, don't create a reducer for enforcing \n" +
-        "bucketing/sorting for queries of the form: \n" +
-        "insert overwrite table T2 select * from T1;\n" +
-        "where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."),
-    HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""),
-    HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false,
-        "If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"),
-    HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false,
-        "If the user asked for bucketed map-side join, and it cannot be performed, \n" +
-        "should the query fail or not ? For example, if the buckets in the tables being joined are\n" +
-        "not a multiple of each other, bucketed map-side join cannot be performed, and the\n" +
-        "query will fail if hive.enforce.bucketmapjoin is set to true."),
-
-    HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", false,
-        "Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."),
-    HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR(
-        "hive.auto.convert.sortmerge.join.bigtable.selection.policy",
-        "org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ",
-        "The policy to choose the big table for automatic conversion to sort-merge join. \n" +
-        "By default, the table with the largest partitions is assigned the big table. All policies are:\n" +
-        ". based on position of the table - the leftmost table is selected\n" +
-        "org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.\n" +
-        ". based on total size (all the partitions selected in the query) of the table \n" +
-        "org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoS

<TRUNCATED>

[54/55] [abbrv] hive git commit: HIVE-12344: Wrong types inferred for SemiJoin generation in CBO (Jesus Camacho Rodriguez, reviewed by Laljo John Pullokkaran)

Posted by jx...@apache.org.
HIVE-12344: Wrong types inferred for SemiJoin generation in CBO (Jesus Camacho Rodriguez, reviewed by Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/898834e5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/898834e5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/898834e5

Branch: refs/heads/master-fixed
Commit: 898834e53ac7a3b169372e8e2eb609ead1ec2f6c
Parents: eef89a2
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Fri Nov 6 17:27:56 2015 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Nov 6 17:27:56 2015 +0100

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/898834e5/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index e2f1cfb..90c2067 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -234,8 +234,8 @@ public class HiveCalciteUtil {
       leftKeys.add(origLeftInputSize + i);
       rightKeys.add(origRightInputSize + i);
       RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
-          rexBuilder.makeInputRef(newLeftFields.get(i).getType(), newLeftOffset + i),
-          rexBuilder.makeInputRef(newLeftFields.get(i).getType(), newRightOffset + i));
+          rexBuilder.makeInputRef(newLeftFields.get(origLeftInputSize + i).getType(), newLeftOffset + i),
+          rexBuilder.makeInputRef(newRightFields.get(origRightInputSize + i).getType(), newRightOffset + i));
       if (outJoinCond == null) {
         outJoinCond = cond;
       } else {


[03/55] [abbrv] hive git commit: HIVE-12215: Exchange partition does not show outputs field for post/pre execute hooks (Aihua Xu, reviewed by Xuefu Zhang)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 0c67416..3bc7e10 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -130,6 +130,8 @@ public class ThriftHiveMetastore {
 
     public Partition exchange_partition(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException;
 
+    public List<Partition> exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException;
+
     public Partition get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
 
     public Partition get_partition_by_name(String db_name, String tbl_name, String part_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
@@ -392,6 +394,8 @@ public class ThriftHiveMetastore {
 
     public void exchange_partition(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void get_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -1937,6 +1941,45 @@ public class ThriftHiveMetastore {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "exchange_partition failed: unknown result");
     }
 
+    public List<Partition> exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException
+    {
+      send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name);
+      return recv_exchange_partitions();
+    }
+
+    public void send_exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws org.apache.thrift.TException
+    {
+      exchange_partitions_args args = new exchange_partitions_args();
+      args.setPartitionSpecs(partitionSpecs);
+      args.setSource_db(source_db);
+      args.setSource_table_name(source_table_name);
+      args.setDest_db(dest_db);
+      args.setDest_table_name(dest_table_name);
+      sendBase("exchange_partitions", args);
+    }
+
+    public List<Partition> recv_exchange_partitions() throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException
+    {
+      exchange_partitions_result result = new exchange_partitions_result();
+      receiveBase(result, "exchange_partitions");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      if (result.o3 != null) {
+        throw result.o3;
+      }
+      if (result.o4 != null) {
+        throw result.o4;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "exchange_partitions failed: unknown result");
+    }
+
     public Partition get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException
     {
       send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names);
@@ -5953,6 +5996,50 @@ public class ThriftHiveMetastore {
       }
     }
 
+    public void exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      exchange_partitions_call method_call = new exchange_partitions_call(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class exchange_partitions_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private Map<String,String> partitionSpecs;
+      private String source_db;
+      private String source_table_name;
+      private String dest_db;
+      private String dest_table_name;
+      public exchange_partitions_call(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.partitionSpecs = partitionSpecs;
+        this.source_db = source_db;
+        this.source_table_name = source_table_name;
+        this.dest_db = dest_db;
+        this.dest_table_name = dest_table_name;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("exchange_partitions", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        exchange_partitions_args args = new exchange_partitions_args();
+        args.setPartitionSpecs(partitionSpecs);
+        args.setSource_db(source_db);
+        args.setSource_table_name(source_table_name);
+        args.setDest_db(dest_db);
+        args.setDest_table_name(dest_table_name);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public List<Partition> getResult() throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_exchange_partitions();
+      }
+    }
+
     public void get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       get_partition_with_auth_call method_call = new get_partition_with_auth_call(db_name, tbl_name, part_vals, user_name, group_names, resultHandler, this, ___protocolFactory, ___transport);
@@ -8997,6 +9084,7 @@ public class ThriftHiveMetastore {
       processMap.put("drop_partitions_req", new drop_partitions_req());
       processMap.put("get_partition", new get_partition());
       processMap.put("exchange_partition", new exchange_partition());
+      processMap.put("exchange_partitions", new exchange_partitions());
       processMap.put("get_partition_with_auth", new get_partition_with_auth());
       processMap.put("get_partition_by_name", new get_partition_by_name());
       processMap.put("get_partitions", new get_partitions());
@@ -10271,6 +10359,36 @@ public class ThriftHiveMetastore {
       }
     }
 
+    public static class exchange_partitions<I extends Iface> extends org.apache.thrift.ProcessFunction<I, exchange_partitions_args> {
+      public exchange_partitions() {
+        super("exchange_partitions");
+      }
+
+      public exchange_partitions_args getEmptyArgsInstance() {
+        return new exchange_partitions_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public exchange_partitions_result getResult(I iface, exchange_partitions_args args) throws org.apache.thrift.TException {
+        exchange_partitions_result result = new exchange_partitions_result();
+        try {
+          result.success = iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name);
+        } catch (MetaException o1) {
+          result.o1 = o1;
+        } catch (NoSuchObjectException o2) {
+          result.o2 = o2;
+        } catch (InvalidObjectException o3) {
+          result.o3 = o3;
+        } catch (InvalidInputException o4) {
+          result.o4 = o4;
+        }
+        return result;
+      }
+    }
+
     public static class get_partition_with_auth<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_partition_with_auth_args> {
       public get_partition_with_auth() {
         super("get_partition_with_auth");
@@ -12463,6 +12581,7 @@ public class ThriftHiveMetastore {
       processMap.put("drop_partitions_req", new drop_partitions_req());
       processMap.put("get_partition", new get_partition());
       processMap.put("exchange_partition", new exchange_partition());
+      processMap.put("exchange_partitions", new exchange_partitions());
       processMap.put("get_partition_with_auth", new get_partition_with_auth());
       processMap.put("get_partition_by_name", new get_partition_by_name());
       processMap.put("get_partitions", new get_partitions());
@@ -15361,20 +15480,20 @@ public class ThriftHiveMetastore {
       }
     }
 
-    public static class get_partition_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_with_auth_args, Partition> {
-      public get_partition_with_auth() {
-        super("get_partition_with_auth");
+    public static class exchange_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, exchange_partitions_args, List<Partition>> {
+      public exchange_partitions() {
+        super("exchange_partitions");
       }
 
-      public get_partition_with_auth_args getEmptyArgsInstance() {
-        return new get_partition_with_auth_args();
+      public exchange_partitions_args getEmptyArgsInstance() {
+        return new exchange_partitions_args();
       }
 
-      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Partition>() { 
-          public void onComplete(Partition o) {
-            get_partition_with_auth_result result = new get_partition_with_auth_result();
+        return new AsyncMethodCallback<List<Partition>>() { 
+          public void onComplete(List<Partition> o) {
+            exchange_partitions_result result = new exchange_partitions_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -15387,7 +15506,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_partition_with_auth_result result = new get_partition_with_auth_result();
+            exchange_partitions_result result = new exchange_partitions_result();
             if (e instanceof MetaException) {
                         result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
@@ -15398,6 +15517,16 @@ public class ThriftHiveMetastore {
                         result.setO2IsSet(true);
                         msg = result;
             }
+            else             if (e instanceof InvalidObjectException) {
+                        result.o3 = (InvalidObjectException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof InvalidInputException) {
+                        result.o4 = (InvalidInputException) e;
+                        result.setO4IsSet(true);
+                        msg = result;
+            }
              else 
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
@@ -15418,25 +15547,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_partition_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
-        iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names,resultHandler);
+      public void start(I iface, exchange_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
+        iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name,resultHandler);
       }
     }
 
-    public static class get_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_by_name_args, Partition> {
-      public get_partition_by_name() {
-        super("get_partition_by_name");
+    public static class get_partition_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_with_auth_args, Partition> {
+      public get_partition_with_auth() {
+        super("get_partition_with_auth");
       }
 
-      public get_partition_by_name_args getEmptyArgsInstance() {
-        return new get_partition_by_name_args();
+      public get_partition_with_auth_args getEmptyArgsInstance() {
+        return new get_partition_with_auth_args();
       }
 
       public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<Partition>() { 
           public void onComplete(Partition o) {
-            get_partition_by_name_result result = new get_partition_by_name_result();
+            get_partition_with_auth_result result = new get_partition_with_auth_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -15449,7 +15578,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_partition_by_name_result result = new get_partition_by_name_result();
+            get_partition_with_auth_result result = new get_partition_with_auth_result();
             if (e instanceof MetaException) {
                         result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
@@ -15480,25 +15609,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
-        iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
+      public void start(I iface, get_partition_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+        iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names,resultHandler);
       }
     }
 
-    public static class get_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_args, List<Partition>> {
-      public get_partitions() {
-        super("get_partitions");
+    public static class get_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_by_name_args, Partition> {
+      public get_partition_by_name() {
+        super("get_partition_by_name");
       }
 
-      public get_partitions_args getEmptyArgsInstance() {
-        return new get_partitions_args();
+      public get_partition_by_name_args getEmptyArgsInstance() {
+        return new get_partition_by_name_args();
       }
 
-      public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<Partition>>() { 
-          public void onComplete(List<Partition> o) {
-            get_partitions_result result = new get_partitions_result();
+        return new AsyncMethodCallback<Partition>() { 
+          public void onComplete(Partition o) {
+            get_partition_by_name_result result = new get_partition_by_name_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -15511,14 +15640,14 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_partitions_result result = new get_partitions_result();
-            if (e instanceof NoSuchObjectException) {
-                        result.o1 = (NoSuchObjectException) e;
+            get_partition_by_name_result result = new get_partition_by_name_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
+            else             if (e instanceof NoSuchObjectException) {
+                        result.o2 = (NoSuchObjectException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -15542,87 +15671,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
-        iface.get_partitions(args.db_name, args.tbl_name, args.max_parts,resultHandler);
+      public void start(I iface, get_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+        iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
       }
     }
 
-    public static class get_partitions_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_with_auth_args, List<Partition>> {
-      public get_partitions_with_auth() {
-        super("get_partitions_with_auth");
+    public static class get_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_args, List<Partition>> {
+      public get_partitions() {
+        super("get_partitions");
       }
 
-      public get_partitions_with_auth_args getEmptyArgsInstance() {
-        return new get_partitions_with_auth_args();
+      public get_partitions_args getEmptyArgsInstance() {
+        return new get_partitions_args();
       }
 
       public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<List<Partition>>() { 
           public void onComplete(List<Partition> o) {
-            get_partitions_with_auth_result result = new get_partitions_with_auth_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            get_partitions_with_auth_result result = new get_partitions_with_auth_result();
-            if (e instanceof NoSuchObjectException) {
-                        result.o1 = (NoSuchObjectException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, get_partitions_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
-        iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names,resultHandler);
-      }
-    }
-
-    public static class get_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_pspec_args, List<PartitionSpec>> {
-      public get_partitions_pspec() {
-        super("get_partitions_pspec");
-      }
-
-      public get_partitions_pspec_args getEmptyArgsInstance() {
-        return new get_partitions_pspec_args();
-      }
-
-      public AsyncMethodCallback<List<PartitionSpec>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<PartitionSpec>>() { 
-          public void onComplete(List<PartitionSpec> o) {
-            get_partitions_pspec_result result = new get_partitions_pspec_result();
+            get_partitions_result result = new get_partitions_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -15635,7 +15702,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_partitions_pspec_result result = new get_partitions_pspec_result();
+            get_partitions_result result = new get_partitions_result();
             if (e instanceof NoSuchObjectException) {
                         result.o1 = (NoSuchObjectException) e;
                         result.setO1IsSet(true);
@@ -15666,144 +15733,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<List<PartitionSpec>> resultHandler) throws TException {
-        iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts,resultHandler);
-      }
-    }
-
-    public static class get_partition_names<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_names_args, List<String>> {
-      public get_partition_names() {
-        super("get_partition_names");
-      }
-
-      public get_partition_names_args getEmptyArgsInstance() {
-        return new get_partition_names_args();
-      }
-
-      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<String>>() { 
-          public void onComplete(List<String> o) {
-            get_partition_names_result result = new get_partition_names_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            get_partition_names_result result = new get_partition_names_result();
-            if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, get_partition_names_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
-        iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts,resultHandler);
-      }
-    }
-
-    public static class get_partitions_ps<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_ps_args, List<Partition>> {
-      public get_partitions_ps() {
-        super("get_partitions_ps");
-      }
-
-      public get_partitions_ps_args getEmptyArgsInstance() {
-        return new get_partitions_ps_args();
-      }
-
-      public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<Partition>>() { 
-          public void onComplete(List<Partition> o) {
-            get_partitions_ps_result result = new get_partitions_ps_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            get_partitions_ps_result result = new get_partitions_ps_result();
-            if (e instanceof MetaException) {
-                        result.o1 = (MetaException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof NoSuchObjectException) {
-                        result.o2 = (NoSuchObjectException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, get_partitions_ps_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
-        iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler);
+      public void start(I iface, get_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
+        iface.get_partitions(args.db_name, args.tbl_name, args.max_parts,resultHandler);
       }
     }
 
-    public static class get_partitions_ps_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_ps_with_auth_args, List<Partition>> {
-      public get_partitions_ps_with_auth() {
-        super("get_partitions_ps_with_auth");
+    public static class get_partitions_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_with_auth_args, List<Partition>> {
+      public get_partitions_with_auth() {
+        super("get_partitions_with_auth");
       }
 
-      public get_partitions_ps_with_auth_args getEmptyArgsInstance() {
-        return new get_partitions_ps_with_auth_args();
+      public get_partitions_with_auth_args getEmptyArgsInstance() {
+        return new get_partitions_with_auth_args();
       }
 
       public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<List<Partition>>() { 
           public void onComplete(List<Partition> o) {
-            get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result();
+            get_partitions_with_auth_result result = new get_partitions_with_auth_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -15816,7 +15764,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result();
+            get_partitions_with_auth_result result = new get_partitions_with_auth_result();
             if (e instanceof NoSuchObjectException) {
                         result.o1 = (NoSuchObjectException) e;
                         result.setO1IsSet(true);
@@ -15847,25 +15795,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_partitions_ps_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
-        iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names,resultHandler);
+      public void start(I iface, get_partitions_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
+        iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names,resultHandler);
       }
     }
 
-    public static class get_partition_names_ps<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_names_ps_args, List<String>> {
-      public get_partition_names_ps() {
-        super("get_partition_names_ps");
+    public static class get_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_pspec_args, List<PartitionSpec>> {
+      public get_partitions_pspec() {
+        super("get_partitions_pspec");
       }
 
-      public get_partition_names_ps_args getEmptyArgsInstance() {
-        return new get_partition_names_ps_args();
+      public get_partitions_pspec_args getEmptyArgsInstance() {
+        return new get_partitions_pspec_args();
       }
 
-      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<PartitionSpec>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<String>>() { 
-          public void onComplete(List<String> o) {
-            get_partition_names_ps_result result = new get_partition_names_ps_result();
+        return new AsyncMethodCallback<List<PartitionSpec>>() { 
+          public void onComplete(List<PartitionSpec> o) {
+            get_partitions_pspec_result result = new get_partitions_pspec_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -15878,14 +15826,14 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_partition_names_ps_result result = new get_partition_names_ps_result();
-            if (e instanceof MetaException) {
-                        result.o1 = (MetaException) e;
+            get_partitions_pspec_result result = new get_partitions_pspec_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof NoSuchObjectException) {
-                        result.o2 = (NoSuchObjectException) e;
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -15909,25 +15857,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_partition_names_ps_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
-        iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler);
+      public void start(I iface, get_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<List<PartitionSpec>> resultHandler) throws TException {
+        iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts,resultHandler);
       }
     }
 
-    public static class get_partitions_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_filter_args, List<Partition>> {
-      public get_partitions_by_filter() {
-        super("get_partitions_by_filter");
+    public static class get_partition_names<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_names_args, List<String>> {
+      public get_partition_names() {
+        super("get_partition_names");
       }
 
-      public get_partitions_by_filter_args getEmptyArgsInstance() {
-        return new get_partitions_by_filter_args();
+      public get_partition_names_args getEmptyArgsInstance() {
+        return new get_partition_names_args();
       }
 
-      public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<Partition>>() { 
-          public void onComplete(List<Partition> o) {
-            get_partitions_by_filter_result result = new get_partitions_by_filter_result();
+        return new AsyncMethodCallback<List<String>>() { 
+          public void onComplete(List<String> o) {
+            get_partition_names_result result = new get_partition_names_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -15940,14 +15888,9 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_partitions_by_filter_result result = new get_partitions_by_filter_result();
+            get_partition_names_result result = new get_partition_names_result();
             if (e instanceof MetaException) {
-                        result.o1 = (MetaException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof NoSuchObjectException) {
-                        result.o2 = (NoSuchObjectException) e;
+                        result.o2 = (MetaException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -15971,25 +15914,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
-        iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler);
+      public void start(I iface, get_partition_names_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+        iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts,resultHandler);
       }
     }
 
-    public static class get_part_specs_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_part_specs_by_filter_args, List<PartitionSpec>> {
-      public get_part_specs_by_filter() {
-        super("get_part_specs_by_filter");
+    public static class get_partitions_ps<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_ps_args, List<Partition>> {
+      public get_partitions_ps() {
+        super("get_partitions_ps");
       }
 
-      public get_part_specs_by_filter_args getEmptyArgsInstance() {
-        return new get_part_specs_by_filter_args();
+      public get_partitions_ps_args getEmptyArgsInstance() {
+        return new get_partitions_ps_args();
       }
 
-      public AsyncMethodCallback<List<PartitionSpec>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<PartitionSpec>>() { 
-          public void onComplete(List<PartitionSpec> o) {
-            get_part_specs_by_filter_result result = new get_part_specs_by_filter_result();
+        return new AsyncMethodCallback<List<Partition>>() { 
+          public void onComplete(List<Partition> o) {
+            get_partitions_ps_result result = new get_partitions_ps_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -16002,7 +15945,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_part_specs_by_filter_result result = new get_part_specs_by_filter_result();
+            get_partitions_ps_result result = new get_partitions_ps_result();
             if (e instanceof MetaException) {
                         result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
@@ -16033,25 +15976,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<PartitionSpec>> resultHandler) throws TException {
-        iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler);
+      public void start(I iface, get_partitions_ps_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
+        iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler);
       }
     }
 
-    public static class get_partitions_by_expr<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_expr_args, PartitionsByExprResult> {
-      public get_partitions_by_expr() {
-        super("get_partitions_by_expr");
+    public static class get_partitions_ps_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_ps_with_auth_args, List<Partition>> {
+      public get_partitions_ps_with_auth() {
+        super("get_partitions_ps_with_auth");
       }
 
-      public get_partitions_by_expr_args getEmptyArgsInstance() {
-        return new get_partitions_by_expr_args();
+      public get_partitions_ps_with_auth_args getEmptyArgsInstance() {
+        return new get_partitions_ps_with_auth_args();
       }
 
-      public AsyncMethodCallback<PartitionsByExprResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<PartitionsByExprResult>() { 
-          public void onComplete(PartitionsByExprResult o) {
-            get_partitions_by_expr_result result = new get_partitions_by_expr_result();
+        return new AsyncMethodCallback<List<Partition>>() { 
+          public void onComplete(List<Partition> o) {
+            get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -16064,14 +16007,14 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_partitions_by_expr_result result = new get_partitions_by_expr_result();
-            if (e instanceof MetaException) {
-                        result.o1 = (MetaException) e;
+            get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof NoSuchObjectException) {
-                        result.o2 = (NoSuchObjectException) e;
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -16095,25 +16038,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback<PartitionsByExprResult> resultHandler) throws TException {
-        iface.get_partitions_by_expr(args.req,resultHandler);
+      public void start(I iface, get_partitions_ps_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
+        iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names,resultHandler);
       }
     }
 
-    public static class get_partitions_by_names<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_names_args, List<Partition>> {
-      public get_partitions_by_names() {
-        super("get_partitions_by_names");
+    public static class get_partition_names_ps<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_names_ps_args, List<String>> {
+      public get_partition_names_ps() {
+        super("get_partition_names_ps");
       }
 
-      public get_partitions_by_names_args getEmptyArgsInstance() {
-        return new get_partitions_by_names_args();
+      public get_partition_names_ps_args getEmptyArgsInstance() {
+        return new get_partition_names_ps_args();
       }
 
-      public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<Partition>>() { 
-          public void onComplete(List<Partition> o) {
-            get_partitions_by_names_result result = new get_partitions_by_names_result();
+        return new AsyncMethodCallback<List<String>>() { 
+          public void onComplete(List<String> o) {
+            get_partition_names_ps_result result = new get_partition_names_ps_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -16126,7 +16069,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_partitions_by_names_result result = new get_partitions_by_names_result();
+            get_partition_names_ps_result result = new get_partition_names_ps_result();
             if (e instanceof MetaException) {
                         result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
@@ -16157,25 +16100,26 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_partitions_by_names_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
-        iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names,resultHandler);
+      public void start(I iface, get_partition_names_ps_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+        iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler);
       }
     }
 
-    public static class alter_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_args, Void> {
-      public alter_partition() {
-        super("alter_partition");
+    public static class get_partitions_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_filter_args, List<Partition>> {
+      public get_partitions_by_filter() {
+        super("get_partitions_by_filter");
       }
 
-      public alter_partition_args getEmptyArgsInstance() {
-        return new alter_partition_args();
+      public get_partitions_by_filter_args getEmptyArgsInstance() {
+        return new get_partitions_by_filter_args();
       }
 
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            alter_partition_result result = new alter_partition_result();
+        return new AsyncMethodCallback<List<Partition>>() { 
+          public void onComplete(List<Partition> o) {
+            get_partitions_by_filter_result result = new get_partitions_by_filter_result();
+            result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -16187,14 +16131,14 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            alter_partition_result result = new alter_partition_result();
-            if (e instanceof InvalidOperationException) {
-                        result.o1 = (InvalidOperationException) e;
+            get_partitions_by_filter_result result = new get_partitions_by_filter_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
+            else             if (e instanceof NoSuchObjectException) {
+                        result.o2 = (NoSuchObjectException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -16218,25 +16162,26 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, alter_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.alter_partition(args.db_name, args.tbl_name, args.new_part,resultHandler);
+      public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
+        iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler);
       }
     }
 
-    public static class alter_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_args, Void> {
-      public alter_partitions() {
-        super("alter_partitions");
+    public static class get_part_specs_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_part_specs_by_filter_args, List<PartitionSpec>> {
+      public get_part_specs_by_filter() {
+        super("get_part_specs_by_filter");
       }
 
-      public alter_partitions_args getEmptyArgsInstance() {
-        return new alter_partitions_args();
+      public get_part_specs_by_filter_args getEmptyArgsInstance() {
+        return new get_part_specs_by_filter_args();
       }
 
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<PartitionSpec>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            alter_partitions_result result = new alter_partitions_result();
+        return new AsyncMethodCallback<List<PartitionSpec>>() { 
+          public void onComplete(List<PartitionSpec> o) {
+            get_part_specs_by_filter_result result = new get_part_specs_by_filter_result();
+            result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -16248,14 +16193,14 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            alter_partitions_result result = new alter_partitions_result();
-            if (e instanceof InvalidOperationException) {
-                        result.o1 = (InvalidOperationException) e;
+            get_part_specs_by_filter_result result = new get_part_specs_by_filter_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
+            else             if (e instanceof NoSuchObjectException) {
+                        result.o2 = (NoSuchObjectException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -16279,25 +16224,26 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, alter_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.alter_partitions(args.db_name, args.tbl_name, args.new_parts,resultHandler);
+      public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<PartitionSpec>> resultHandler) throws TException {
+        iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler);
       }
     }
 
-    public static class alter_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_with_environment_context_args, Void> {
-      public alter_partition_with_environment_context() {
-        super("alter_partition_with_environment_context");
+    public static class get_partitions_by_expr<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_expr_args, PartitionsByExprResult> {
+      public get_partitions_by_expr() {
+        super("get_partitions_by_expr");
       }
 
-      public alter_partition_with_environment_context_args getEmptyArgsInstance() {
-        return new alter_partition_with_environment_context_args();
+      public get_partitions_by_expr_args getEmptyArgsInstance() {
+        return new get_partitions_by_expr_args();
       }
 
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<PartitionsByExprResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
+        return new AsyncMethodCallback<PartitionsByExprResult>() { 
+          public void onComplete(PartitionsByExprResult o) {
+            get_partitions_by_expr_result result = new get_partitions_by_expr_result();
+            result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -16309,14 +16255,14 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
-            if (e instanceof InvalidOperationException) {
-                        result.o1 = (InvalidOperationException) e;
+            get_partitions_by_expr_result result = new get_partitions_by_expr_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
+            else             if (e instanceof NoSuchObjectException) {
+                        result.o2 = (NoSuchObjectException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -16340,25 +16286,26 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, alter_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.alter_partition_with_environment_context(args.db_name, args.tbl_name, args.new_part, args.environment_context,resultHandler);
+      public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback<PartitionsByExprResult> resultHandler) throws TException {
+        iface.get_partitions_by_expr(args.req,resultHandler);
       }
     }
 
-    public static class rename_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, rename_partition_args, Void> {
-      public rename_partition() {
-        super("rename_partition");
+    public static class get_partitions_by_names<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_names_args, List<Partition>> {
+      public get_partitions_by_names() {
+        super("get_partitions_by_names");
       }
 
-      public rename_partition_args getEmptyArgsInstance() {
-        return new rename_partition_args();
+      public get_partitions_by_names_args getEmptyArgsInstance() {
+        return new get_partitions_by_names_args();
       }
 
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            rename_partition_result result = new rename_partition_result();
+        return new AsyncMethodCallback<List<Partition>>() { 
+          public void onComplete(List<Partition> o) {
+            get_partitions_by_names_result result = new get_partitions_by_names_result();
+            result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -16370,14 +16317,258 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            rename_partition_result result = new rename_partition_result();
-            if (e instanceof InvalidOperationException) {
-                        result.o1 = (InvalidOperationException) e;
+            get_partitions_by_names_result result = new get_partitions_by_names_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
+            else             if (e instanceof NoSuchObjectException) {
+                        result.o2 = (NoSuchObjectException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, get_partitions_by_names_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
+        iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names,resultHandler);
+      }
+    }
+
+    public static class alter_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_args, Void> {
+      public alter_partition() {
+        super("alter_partition");
+      }
+
+      public alter_partition_args getEmptyArgsInstance() {
+        return new alter_partition_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            alter_partition_result result = new alter_partition_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            alter_partition_result result = new alter_partition_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, alter_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.alter_partition(args.db_name, args.tbl_name, args.new_part,resultHandler);
+      }
+    }
+
+    public static class alter_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_args, Void> {
+      public alter_partitions() {
+        super("alter_partitions");
+      }
+
+      public alter_partitions_args getEmptyArgsInstance() {
+        return new alter_partitions_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            alter_partitions_result result = new alter_partitions_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            alter_partitions_result result = new alter_partitions_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, alter_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.alter_partitions(args.db_name, args.tbl_name, args.new_parts,resultHandler);
+      }
+    }
+
+    public static class alter_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_with_environment_context_args, Void> {
+      public alter_partition_with_environment_context() {
+        super("alter_partition_with_environment_context");
+      }
+
+      public alter_partition_with_environment_context_args getEmptyArgsInstance() {
+        return new alter_partition_with_environment_context_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, alter_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.alter_partition_with_environment_context(args.db_name, args.tbl_name, args.new_part, args.environment_context,resultHandler);
+      }
+    }
+
+    public static class rename_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, rename_partition_args, Void> {
+      public rename_partition() {
+        super("rename_partition");
+      }
+
+      public rename_partition_args getEmptyArgsInstance() {
+        return new rename_partition_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            rename_partition_result result = new rename_partition_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            rename_partition_result result = new rename_partition_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -67468,7 +67659,1554 @@ public class ThriftHiveMetastore {
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder("get_partition_result(");
+      StringBuilder sb = new StringBuilder("get_partition_result(");
+      boolean first = true;
+
+      sb.append("success:");
+      if (this.success == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.success);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("o1:");
+      if (this.o1 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o1);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("o2:");
+      if (this.o2 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o2);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+      if (success != null) {
+        success.validate();
+      }
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class get_partition_resultStandardSchemeFactory implements SchemeFactory {
+      public get_partition_resultStandardScheme getScheme() {
+        return new get_partition_resultStandardScheme();
+      }
+    }
+
+    private static class get_partition_resultStandardScheme extends StandardScheme<get_partition_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 0: // SUCCESS
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.success = new Partition();
+                struct.success.read(iprot);
+                struct.setSuccessIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 1: // O1
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o1 = new MetaException();
+                struct.o1.read(iprot);
+                struct.setO1IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 2: // O2
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o2 = new NoSuchObjectException();
+                struct.o2.read(iprot);
+                struct.setO2IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.success != null) {
+          oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+          struct.success.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        if (struct.o1 != null) {
+          oprot.writeFieldBegin(O1_FIELD_DESC);
+          struct.o1.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        if (struct.o2 != null) {
+          oprot.writeFieldBegin(O2_FIELD_DESC);
+          struct.o2.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class get_partition_resultTupleSchemeFactory implements SchemeFactory {
+      public get_partition_resultTupleScheme getScheme() {
+        return new get_partition_resultTupleScheme();
+      }
+    }
+
+    private static class get_partition_resultTupleScheme extends TupleScheme<get_partition_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetSuccess()) {
+          optionals.set(0);
+        }
+        if (struct.isSetO1()) {
+          optionals.set(1);
+        }
+        if (struct.isSetO2()) {
+          optionals.set(2);
+        }
+        oprot.writeBitSet(optionals, 3);
+        if (struct.isSetSuccess()) {
+          struct.success.write(oprot);
+        }
+        if (struct.isSetO1()) {
+          struct.o1.write(oprot);
+        }
+        if (struct.isSetO2()) {
+          struct.o2.write(oprot);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(3);
+        if (incoming.get(0)) {
+          struct.success = new Partition();
+          struct.success.read(iprot);
+          struct.setSuccessIsSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.o1 = new MetaException();
+          struct.o1.read(iprot);
+          struct.setO1IsSet(true);
+        }
+        if (incoming.get(2)) {
+          struct.o2 = new NoSuchObjectException();
+          struct.o2.read(iprot);
+          struct.setO2IsSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class exchange_partition_args implements org.apache.thrift.TBase<exchange_partition_args, exchange_partition_args._Fields>, java.io.Serializable, Cloneable, Comparable<exchange_partition_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("exchange_partition_args");
+
+    private static final org.apache.thrift.protocol.TField PARTITION_SPECS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionSpecs", org.apache.thrift.protocol.TType.MAP, (short)1);
+    private static final org.apache.thrift.protocol.TField SOURCE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("source_db", org.apache.thrift.protocol.TType.STRING, (short)2);
+    private static final org.apache.thrift.protocol.TField SOURCE_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("source_table_name", org.apache.thrift.protocol.TType.STRING, (short)3);
+    private static final org.apache.thrift.protocol.TField DEST_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("dest_db", org.apache.thrift.protocol.TType.STRING, (short)4);
+    private static final org.apache.thrift.protocol.TField DEST_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dest_table_name", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new exchange_partition_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new exchange_partition_argsTupleSchemeFactory());
+    }
+
+    private Map<String,String> partitionSpecs; // required
+    private String source_db; // required
+    private String source_table_name; // required
+    private String dest_db; // required
+    private String dest_table_name; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      PARTITION_SPECS((short)1, "partitionSpecs"),
+      SOURCE_DB((short)2, "source_db"),
+      SOURCE_TABLE_NAME((short)3, "source_table_name"),
+      DEST_DB((short)4, "dest_db"),
+      DEST_TABLE_NAME((short)5, "dest_table_name");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // PARTITION_SPECS
+            return PARTITION_SPECS;
+          case 2: // SOURCE_DB
+            return SOURCE_DB;
+          case 3: // SOURCE_TABLE_NAME
+            return SOURCE_TABLE_NAME;
+          case 4: // DEST_DB
+            return DEST_DB;
+          case 5: // DEST_TABLE_NAME
+            return DEST_TABLE_NAME;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.PARTITION_SPECS, new org.apache.thrift.meta_data.FieldMetaData("partitionSpecs", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+      tmpMap.put(_Fields.SOURCE_DB, new org.apache.thrift.meta_data.FieldMetaData("source_db", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      tmpMap.put(_Fields.SOURCE_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("source_table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      tmpMap.put(_Fields.DEST_DB, new org.apache.thrift.meta_data.FieldMetaData("dest_db", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      tmpMap.put(_Fields.DEST_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("dest_table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(exchange_partition_args.class, metaDataMap);
+    }
+
+    public exchange_partition_args() {
+    }
+
+    public exchange_partition_args(
+      Map<String,String> partitionSpecs,
+      String source_db,
+      String source_table_name,
+      String dest_db,
+      String dest_table_name)
+    {
+      this();
+      this.partitionSpecs = partitionSpecs;
+      this.source_db = source_db;
+      this.source_table_name = source_table_name;
+      this.dest_db = dest_db;
+      this.dest_table_name = dest_table_name;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public exchange_partition_args(exchange_partition_args other) {
+      if (other.isSetPartitionSpecs()) {
+        Map<String,String> __this__partitionSpecs = new HashMap<String,String>(other.partitionSpecs);
+        this.partitionSpecs = __this__partitionSpecs;
+      }
+      if (other.isSetSource_db()) {
+        this.source_db = other.source_db;
+      }
+      if (other.isSetSource_table_name()) {
+        this.source_table_name = other.source_table_name;
+      }
+      if (other.isSetDest_db()) {
+        this.dest_db = other.dest_db;
+      }
+      if (other.isSetDest_table_name()) {
+        this.dest_table_name = other.dest_table_name;
+      }
+    }
+
+    public exchange_partition_args deepCopy() {
+      return new exchange_partition_args(this);
+    }
+
+    @Override
+    public void clear() {
+      this.partitionSpecs = null;
+      this.source_db = null;
+      this.source_table_name = null;
+      this.dest_db = null;
+      this.dest_table_name = null;
+    }
+
+    public int getPartitionSpecsSize() {
+      return (this.partitionSpecs == null) ? 0 : this.partitionSpecs.size();
+    }
+
+    public void putToPartitionSpecs(String key, String val) {
+      if (this.partitionSpecs == null) {
+        this.partitionSpecs = new HashMap<String,String>();
+      }
+      this.partitionSpecs.put(key, val);
+    }
+
+    public Map<String,String> getPartitionSpecs() {
+      return this.partitionSpecs;
+    }
+
+    public void setPartitionSpecs(Map<String,String> partitionSpecs) {
+      this.partitionSpecs = partitionSpecs;
+    }
+
+    public void unsetPartitionSpecs() {
+      this.partitionSpecs = null;
+    }
+
+    /** Returns true if field partitionSpecs is set (has been assigned a value) and false otherwise */
+    public boolean isSetPartitionSpecs() {
+      return this.partitionSpecs != null;
+    }
+
+    public void setPartitionSpecsIsSet(boolean value) {
+      if (!value) {
+        this.partitionSpecs = null;
+      }
+    }
+
+    public String getSource_db() {
+      return this.source_db;
+    }
+
+    public void setSource_db(String source_db) {
+      this.source_db = source_db;
+    }
+
+    public void unsetSource_db() {
+      this.source_db = null;
+    }
+
+    /** Returns true if field source_db is set (has been assigned a value) and false otherwise */
+    public boolean isSetSource_db() {
+      return this.source_db != null;
+    }
+
+    public void setSource_dbIsSet(boolean value) {
+      if (!value) {
+        this.source_db = null;
+      }
+    }
+
+    public String getSource_table_name() {
+      return this.source_table_name;
+    }
+
+    public void setSource_table_name(String source_table_name) {
+      this.source_table_name = source_table_name;
+    }
+
+    public void unsetSource_table_name() {
+      this.source_table_name = null;
+    }
+
+    /** Returns true if field source_table_name is set (has been assigned a value) and false otherwise */
+    public boolean isSetSource_table_name() {
+      return this.source_table_name != null;
+    }
+
+    public void setSource_table_nameIsSet(boolean value) {
+      if (!value) {
+        this.source_table_name = null;
+      }
+    }
+
+    public String getDest_db

<TRUNCATED>

[46/55] [abbrv] hive git commit: HIVE-12340 : ExecDriver.execute() unnecessarily sets METASTOREPWD to HIVE (Hari Subramaniyan, reviewed by Sushanth Sowmyan)

Posted by jx...@apache.org.
HIVE-12340 : ExecDriver.execute() unnecessarily sets METASTOREPWD to HIVE (Hari Subramaniyan, reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/81de8570
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/81de8570
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/81de8570

Branch: refs/heads/master-fixed
Commit: 81de85706c68ef395c758241fbaf88f02b0d1941
Parents: 4d2df79
Author: Hari Subramaniyan <ha...@apache.org>
Authored: Thu Nov 5 11:47:59 2015 -0800
Committer: Hari Subramaniyan <ha...@apache.org>
Committed: Thu Nov 5 11:47:59 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java     | 10 ----------
 1 file changed, 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/81de8570/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index 5cbf764..380cf08 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -390,12 +390,6 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
         }
       }
 
-      // remove the pwd from conf file so that job tracker doesn't show this
-      // logs
-      String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
-      if (pwd != null) {
-        HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
-      }
       JobClient jc = new JobClient(job);
       // make this client wait if job tracker is not behaving well.
       Throttle.checkJobTracker(job, LOG);
@@ -433,10 +427,6 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
 
       // Finally SUBMIT the JOB!
       rj = jc.submitJob(job);
-      // replace it back
-      if (pwd != null) {
-        HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, pwd);
-      }
 
       returnVal = jobExecHelper.progress(rj, jc, ctx.getHiveTxnManager());
       success = (returnVal == 0);


[18/55] [abbrv] hive git commit: HIVE-12209: Vectorize simple UDFs with null arguments (Gopal V, reviewed by Sergey Shelukhin)

Posted by jx...@apache.org.
HIVE-12209: Vectorize simple UDFs with null arguments (Gopal V, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/db2c5009
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/db2c5009
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/db2c5009

Branch: refs/heads/master-fixed
Commit: db2c5009b243aeb5be09225b03476d1c12ebef84
Parents: 492a10f
Author: Gopal V <go...@apache.org>
Authored: Mon Nov 2 19:42:35 2015 -0800
Committer: Gopal V <go...@apache.org>
Committed: Mon Nov 2 19:42:35 2015 -0800

----------------------------------------------------------------------
 .../ql/exec/vector/VectorizationContext.java    |   7 +-
 .../ql/exec/vector/udf/VectorUDFArgDesc.java    |  19 ++--
 .../queries/clientpositive/vectorized_case.q    |  19 ++++
 .../clientpositive/spark/vectorized_case.q.out  | 109 +++++++++++++++++--
 .../clientpositive/tez/vectorized_case.q.out    | 109 +++++++++++++++++--
 .../clientpositive/vectorized_case.q.out        |  69 ++++++++++++
 6 files changed, 301 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 3489c9c..e7a829e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -2022,12 +2022,7 @@ public class VectorizationContext {
         variableArgPositions.add(i);
         argDescs[i].setVariable(getInputColumnIndex(((ExprNodeColumnDesc) child).getColumn()));
       } else if (child instanceof ExprNodeConstantDesc) {
-         if (((ExprNodeConstantDesc) child).getValue() == null) {
-           // cannot handle constant null at the moment
-           throw new HiveException("Unable to vectorize custom UDF. Custom udf containing "
-               + "constant null argument cannot be currently vectorized.");
-         }
-        // this is a constant
+        // this is a constant (or null)
         argDescs[i].setConstant((ExprNodeConstantDesc) child);
       } else {
         throw new HiveException("Unable to vectorize custom UDF. Encountered unsupported expr desc : "

http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java
index e113980..6abfe63 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java
@@ -59,13 +59,18 @@ public class VectorUDFArgDesc implements Serializable {
    * during initialization.
    */
   public void prepareConstant() {
-    PrimitiveCategory pc = ((PrimitiveTypeInfo) constExpr.getTypeInfo())
-        .getPrimitiveCategory();
-
-    // Convert from Java to Writable
-    Object writableValue = PrimitiveObjectInspectorFactory
-        .getPrimitiveJavaObjectInspector(pc).getPrimitiveWritableObject(
-          constExpr.getValue());
+    final Object writableValue;
+    if (constExpr != null) {
+      PrimitiveCategory pc = ((PrimitiveTypeInfo) constExpr.getTypeInfo())
+          .getPrimitiveCategory();
+
+      // Convert from Java to Writable
+      writableValue = PrimitiveObjectInspectorFactory
+          .getPrimitiveJavaObjectInspector(pc).getPrimitiveWritableObject(
+            constExpr.getValue());
+    } else {
+      writableValue = null;
+    }
 
     constObjVal = new GenericUDF.DeferredJavaObject(writableValue);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/test/queries/clientpositive/vectorized_case.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_case.q b/ql/src/test/queries/clientpositive/vectorized_case.q
index 8799fbb..e74bf82 100644
--- a/ql/src/test/queries/clientpositive/vectorized_case.q
+++ b/ql/src/test/queries/clientpositive/vectorized_case.q
@@ -1,4 +1,5 @@
 set hive.explain.user=false;
+set hive.fetch.task.conversion=none;
 set hive.vectorized.execution.enabled = true
 ;
 explain
@@ -36,3 +37,21 @@ where csmallint = 418
 or csmallint = 12205
 or csmallint = 10583
 ;
+explain
+select 
+  csmallint,
+  case 
+    when csmallint = 418 then "a"
+    when csmallint = 12205 then "b"
+    else null
+  end,
+  case csmallint
+    when 418 then "a"
+    when 12205 then null
+    else "c"
+  end
+from alltypesorc
+where csmallint = 418
+or csmallint = 12205
+or csmallint = 10583
+;

http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
index c2250e6..ade9cfe 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
@@ -35,21 +35,40 @@ or csmallint = 12205
 or csmallint = 10583
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-0 is a root stage
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
+                    Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        TableScan
-          alias: alltypesorc
-          Filter Operator
-            predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
-            Select Operator
-              expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              ListSink
+        ListSink
 
 PREHOOK: query: select 
   csmallint,
@@ -93,3 +112,75 @@ POSTHOOK: Input: default@alltypesorc
 10583	c	c
 418	a	a
 12205	b	b
+PREHOOK: query: explain
+select 
+  csmallint,
+  case 
+    when csmallint = 418 then "a"
+    when csmallint = 12205 then "b"
+    else null
+  end,
+  case csmallint
+    when 418 then "a"
+    when 12205 then null
+    else "c"
+  end
+from alltypesorc
+where csmallint = 418
+or csmallint = 12205
+or csmallint = 10583
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select 
+  csmallint,
+  case 
+    when csmallint = 418 then "a"
+    when csmallint = 12205 then "b"
+    else null
+  end,
+  case csmallint
+    when 418 then "a"
+    when 12205 then null
+    else "c"
+  end
+from alltypesorc
+where csmallint = 418
+or csmallint = 12205
+or csmallint = 10583
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
+                    Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN (null) ELSE ('c') END (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
index c2250e6..136714d 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
@@ -35,21 +35,40 @@ or csmallint = 12205
 or csmallint = 10583
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-0 is a root stage
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
+                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        TableScan
-          alias: alltypesorc
-          Filter Operator
-            predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
-            Select Operator
-              expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              ListSink
+        ListSink
 
 PREHOOK: query: select 
   csmallint,
@@ -93,3 +112,75 @@ POSTHOOK: Input: default@alltypesorc
 10583	c	c
 418	a	a
 12205	b	b
+PREHOOK: query: explain
+select 
+  csmallint,
+  case 
+    when csmallint = 418 then "a"
+    when csmallint = 12205 then "b"
+    else null
+  end,
+  case csmallint
+    when 418 then "a"
+    when 12205 then null
+    else "c"
+  end
+from alltypesorc
+where csmallint = 418
+or csmallint = 12205
+or csmallint = 10583
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select 
+  csmallint,
+  case 
+    when csmallint = 418 then "a"
+    when csmallint = 12205 then "b"
+    else null
+  end,
+  case csmallint
+    when 418 then "a"
+    when 12205 then null
+    else "c"
+  end
+from alltypesorc
+where csmallint = 418
+or csmallint = 12205
+or csmallint = 10583
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
+                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN (null) ELSE ('c') END (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/test/results/clientpositive/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_case.q.out b/ql/src/test/results/clientpositive/vectorized_case.q.out
index 73bf12d..347a93e 100644
--- a/ql/src/test/results/clientpositive/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_case.q.out
@@ -109,3 +109,72 @@ POSTHOOK: Input: default@alltypesorc
 10583	c	c
 418	a	a
 12205	b	b
+PREHOOK: query: explain
+select 
+  csmallint,
+  case 
+    when csmallint = 418 then "a"
+    when csmallint = 12205 then "b"
+    else null
+  end,
+  case csmallint
+    when 418 then "a"
+    when 12205 then null
+    else "c"
+  end
+from alltypesorc
+where csmallint = 418
+or csmallint = 12205
+or csmallint = 10583
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select 
+  csmallint,
+  case 
+    when csmallint = 418 then "a"
+    when csmallint = 12205 then "b"
+    else null
+  end,
+  case csmallint
+    when 418 then "a"
+    when 12205 then null
+    else "c"
+  end
+from alltypesorc
+where csmallint = 418
+or csmallint = 12205
+or csmallint = 10583
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypesorc
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
+              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN (null) ELSE ('c') END (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+


[48/55] [abbrv] hive git commit: HIVE-12164 : non-ascii characters shows improper with insert into (Aleksei Statkevich via Xuefu Zhang)

Posted by jx...@apache.org.
HIVE-12164 : non-ascii characters shows improper with insert into (Aleksei Statkevich via Xuefu Zhang)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e8c8a330
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e8c8a330
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e8c8a330

Branch: refs/heads/master-fixed
Commit: e8c8a33029da7e7ebef7ecc4af454b26912491bc
Parents: 9dae39c
Author: Aleksei Statkevich <me...@gmail.com>
Authored: Mon Oct 19 22:37:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Nov 5 13:54:53 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 16 ++++++++---
 .../clientpositive/insert_values_nonascii.q     |  9 +++++++
 .../clientpositive/insert_values_nonascii.q.out | 28 ++++++++++++++++++++
 3 files changed, 50 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e8c8a330/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index f3d7057..f7e2039 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -216,6 +216,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.OutputFormat;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -733,6 +734,15 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   /**
+   * Convert a string to Text format and write its bytes in the same way TextOutputFormat would do.
+   * This is needed to properly encode non-ascii characters.
+   */
+  private static void writeAsText(String text, FSDataOutputStream out) throws IOException {
+    Text to = new Text(text);
+    out.write(to.getBytes(), 0, to.getLength());
+  }
+
+  /**
    * Generate a temp table out of a value clause
    * See also {@link #preProcessForInsert(ASTNode, QB)}
    */
@@ -810,10 +820,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             fields.add(new FieldSchema("tmp_values_col" + nextColNum++, "string", ""));
           }
           if (isFirst) isFirst = false;
-          else out.writeBytes("\u0001");
-          out.writeBytes(unparseExprForValuesClause(value));
+          else writeAsText("\u0001", out);
+          writeAsText(unparseExprForValuesClause(value), out);
         }
-        out.writeBytes("\n");
+        writeAsText("\n", out);
         firstRow = false;
       }
       out.close();

http://git-wip-us.apache.org/repos/asf/hive/blob/e8c8a330/ql/src/test/queries/clientpositive/insert_values_nonascii.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_nonascii.q b/ql/src/test/queries/clientpositive/insert_values_nonascii.q
new file mode 100644
index 0000000..2e4ef41
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/insert_values_nonascii.q
@@ -0,0 +1,9 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+
+create table insert_values_nonascii(t1 char(32), t2 string);
+
+insert into insert_values_nonascii values("Абвгде Garçu 谢谢",  "Kôkaku ありがとう"), ("ございます", "kidôtai한국어");
+
+select * from insert_values_nonascii;

http://git-wip-us.apache.org/repos/asf/hive/blob/e8c8a330/ql/src/test/results/clientpositive/insert_values_nonascii.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_values_nonascii.q.out b/ql/src/test/results/clientpositive/insert_values_nonascii.q.out
new file mode 100644
index 0000000..ca07bef
--- /dev/null
+++ b/ql/src/test/results/clientpositive/insert_values_nonascii.q.out
@@ -0,0 +1,28 @@
+PREHOOK: query: create table insert_values_nonascii(t1 char(32), t2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@insert_values_nonascii
+POSTHOOK: query: create table insert_values_nonascii(t1 char(32), t2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@insert_values_nonascii
+PREHOOK: query: insert into insert_values_nonascii values("Абвгде Garçu 谢谢",  "Kôkaku ありがとう"), ("ございます", "kidôtai한국어")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@insert_values_nonascii
+POSTHOOK: query: insert into insert_values_nonascii values("Абвгде Garçu 谢谢",  "Kôkaku ありがとう"), ("ございます", "kidôtai한국어")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@insert_values_nonascii
+POSTHOOK: Lineage: insert_values_nonascii.t1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: insert_values_nonascii.t2 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: select * from insert_values_nonascii
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_values_nonascii
+#### A masked pattern was here ####
+POSTHOOK: query: select * from insert_values_nonascii
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_values_nonascii
+#### A masked pattern was here ####
+Абвгде Garçu 谢谢                 	Kôkaku ありがとう
+ございます                           	kidôtai한국어


[53/55] [abbrv] hive git commit: HIVE-12346:Internally used variables in HiveConf should not be settable via command (Chaoyu Tang, reviewed by Xuefu Zhang)

Posted by jx...@apache.org.
HIVE-12346:Internally used variables in HiveConf should not be settable via command (Chaoyu Tang, reviewed by Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eef89a21
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eef89a21
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eef89a21

Branch: refs/heads/master-fixed
Commit: eef89a2105a6ec401d18f9f1de2912a89c8eb4ac
Parents: 3bf280f
Author: ctang <ct...@gmail.com>
Authored: Fri Nov 6 08:51:41 2015 -0500
Committer: ctang <ct...@gmail.com>
Committed: Fri Nov 6 08:51:53 2015 -0500

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/conf/HiveConf.java   | 15 ++++++++++++++-
 .../clientnegative/set_hiveconf_internal_variable0.q |  4 ++++
 .../clientnegative/set_hiveconf_internal_variable1.q |  4 ++++
 .../set_hiveconf_internal_variable0.q.out            | 11 +++++++++++
 .../set_hiveconf_internal_variable1.q.out            | 11 +++++++++++
 5 files changed, 44 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/eef89a21/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 98f9206..12276bf 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2104,6 +2104,10 @@ public class HiveConf extends Configuration {
         METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname,
         "Comma separated list of configuration options which should not be read by normal user like passwords"),
 
+    HIVE_CONF_INTERNAL_VARIABLE_LIST("hive.conf.internal.variable.list",
+        "hive.added.files.path,hive.added.jars.path,hive.added.archives.path",
+        "Comma separated list of variables which are used internally and should not be configurable."),
+
     // If this is set all move tasks at the end of a multi-insert query will only begin once all
     // outputs are ready
     HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES(
@@ -2634,7 +2638,7 @@ public class HiveConf extends Configuration {
     }
     if (restrictList.contains(name)) {
       throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list"
-          + "of parameters that can't be modified at runtime");
+          + " of parameters that can't be modified at runtime");
     }
     String oldValue = name != null ? get(name) : null;
     if (name == null || value == null || !value.equals(oldValue)) {
@@ -3329,9 +3333,18 @@ public class HiveConf extends Configuration {
         restrictList.add(entry.trim());
       }
     }
+
+    String internalVariableListStr = this.getVar(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST);
+    if (internalVariableListStr != null) {
+      for (String entry : internalVariableListStr.split(",")) {
+        restrictList.add(entry.trim());
+      }
+    }
+
     restrictList.add(ConfVars.HIVE_IN_TEST.varname);
     restrictList.add(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname);
     restrictList.add(ConfVars.HIVE_CONF_HIDDEN_LIST.varname);
+    restrictList.add(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST.varname);
   }
 
   private void setupHiddenSet() {

http://git-wip-us.apache.org/repos/asf/hive/blob/eef89a21/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable0.q b/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable0.q
new file mode 100644
index 0000000..b6393e4
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable0.q
@@ -0,0 +1,4 @@
+-- should fail: for some internal variables which should not be settable via set command
+desc src;
+
+set hive.added.jars.path=file://rootdir/test/added/a.jar;

http://git-wip-us.apache.org/repos/asf/hive/blob/eef89a21/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable1.q b/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable1.q
new file mode 100644
index 0000000..0038f36
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/set_hiveconf_internal_variable1.q
@@ -0,0 +1,4 @@
+-- should fail: hive.conf.internal.variable.list is in restricted list
+desc src;
+
+set hive.conf.internal.variable.list=;

http://git-wip-us.apache.org/repos/asf/hive/blob/eef89a21/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out
new file mode 100644
index 0000000..61dafb4
--- /dev/null
+++ b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out
@@ -0,0 +1,11 @@
+PREHOOK: query: -- should fail: for some internal variables which should not be settable via set command
+desc src
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src
+POSTHOOK: query: -- should fail: for some internal variables which should not be settable via set command
+desc src
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src
+key                 	string              	default             
+value               	string              	default             
+Query returned non-zero code: 1, cause: Cannot modify hive.added.jars.path at runtime. It is in the list of parameters that can't be modified at runtime

http://git-wip-us.apache.org/repos/asf/hive/blob/eef89a21/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out
new file mode 100644
index 0000000..ae2dafb
--- /dev/null
+++ b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out
@@ -0,0 +1,11 @@
+PREHOOK: query: -- should fail: hive.conf.internal.variable.list is in restricted list
+desc src
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src
+POSTHOOK: query: -- should fail: hive.conf.internal.variable.list is in restricted list
+desc src
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src
+key                 	string              	default             
+value               	string              	default             
+Query returned non-zero code: 1, cause: Cannot modify hive.conf.internal.variable.list at runtime. It is in the list of parameters that can't be modified at runtime


[10/55] [abbrv] hive git commit: HIVE-12292 : revert the if removal from HIVE-12237 (Sergey Shelukhin, reviewed by Ashutosh Chauhan)

Posted by jx...@apache.org.
HIVE-12292 : revert the if removal from HIVE-12237 (Sergey Shelukhin, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6fda3b55
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6fda3b55
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6fda3b55

Branch: refs/heads/master-fixed
Commit: 6fda3b55e9ae680f47c55395f90be762285f2760
Parents: c9246f4
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Nov 2 13:03:01 2015 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Nov 2 13:03:01 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/cli/CliDriver.java   |  4 +-
 .../hadoop/hive/llap/cache/BuddyAllocator.java  |  9 ++--
 .../hive/llap/cache/LowLevelCacheImpl.java      |  5 +-
 .../llap/cache/LowLevelCacheMemoryManager.java  |  5 +-
 .../llap/cache/LowLevelFifoCachePolicy.java     |  4 +-
 .../llap/cache/LowLevelLrfuCachePolicy.java     |  7 +--
 .../hive/llap/io/api/impl/LlapIoImpl.java       | 17 +++++--
 .../llap/io/decode/OrcColumnVectorProducer.java |  9 ++--
 .../llap/io/encoded/OrcEncodedDataReader.java   |  7 +--
 .../org/apache/hadoop/hive/llap/LogLevels.java  | 53 ++++++++++++++++++++
 10 files changed, 95 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
----------------------------------------------------------------------
diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
index 82d064d..b359850 100644
--- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
+++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
@@ -107,7 +107,9 @@ public class CliDriver {
     SessionState ss = SessionState.get();
     conf = (ss != null) ? ss.getConf() : new Configuration();
     Logger LOG = LoggerFactory.getLogger("CliDriver");
-    LOG.debug("CliDriver inited with classpath {}", System.getProperty("java.class.path"));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("CliDriver inited with classpath {}", System.getProperty("java.class.path"));
+    }
     console = new LogHelper(LOG);
     originalThreadName = Thread.currentThread().getName();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
index f69ac5b..2aca68d 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
@@ -48,10 +48,11 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
     maxAllocation = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_MAX_ALLOC);
     arenaSize = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_ARENA_SIZE);
     long maxSizeVal = HiveConf.getLongVar(conf, ConfVars.LLAP_ORC_CACHE_MAX_SIZE);
-    LlapIoImpl.LOG.info("Buddy allocator with {}", (isDirect ? "direct" : "byte")
-        , " buffers; allocation sizes {} ", minAllocation, " - {}", maxAllocation
-        , ", arena size {}", arenaSize, ". total size {}", maxSizeVal);
-
+    if (LlapIoImpl.LOGL.isInfoEnabled()) {
+      LlapIoImpl.LOG.info("Buddy allocator with " + (isDirect ? "direct" : "byte")
+          + " buffers; allocation sizes " + minAllocation + " - " + maxAllocation
+          + ", arena size " + arenaSize + ". total size " + maxSizeVal);
+    }
 
     if (minAllocation < 8) {
       throw new AssertionError("Min allocation must be at least 8: " + minAllocation);

http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
index e7b8f1a..c2a130a 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
@@ -58,8 +58,9 @@ public class LowLevelCacheImpl implements LowLevelCache, LlapOomDebugDump {
   @VisibleForTesting
   LowLevelCacheImpl(LlapDaemonCacheMetrics metrics, LowLevelCachePolicy cachePolicy,
       EvictionAwareAllocator allocator, boolean doAssumeGranularBlocks, long cleanupInterval) {
-      LlapIoImpl.LOG.info("Low level cache; cleanup interval {}", cleanupInterval, "sec");
-
+    if (LlapIoImpl.LOGL.isInfoEnabled()) {
+      LlapIoImpl.LOG.info("Low level cache; cleanup interval " + cleanupInterval + "sec");
+    }
     this.cachePolicy = cachePolicy;
     this.allocator = allocator;
     this.cleanupInterval = cleanupInterval;

http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
index 8a39e35..4a256ee 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
@@ -44,8 +44,9 @@ public class LowLevelCacheMemoryManager implements MemoryManager {
     this.usedMemory = new AtomicLong(0);
     this.metrics = metrics;
     metrics.incrCacheCapacityTotal(maxSize);
-    LlapIoImpl.LOG.info("Cache memory manager initialized with max size {}", maxSize);
-
+    if (LlapIoImpl.LOGL.isInfoEnabled()) {
+      LlapIoImpl.LOG.info("Cache memory manager initialized with max size " + maxSize);
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
index 0838682..1430eae 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
@@ -35,7 +35,9 @@ public class LowLevelFifoCachePolicy implements LowLevelCachePolicy {
   private LlapOomDebugDump parentDebugDump;
 
   public LowLevelFifoCachePolicy(Configuration conf) {
-    LlapIoImpl.LOG.info("FIFO cache policy");
+    if (LlapIoImpl.LOGL.isInfoEnabled()) {
+      LlapIoImpl.LOG.info("FIFO cache policy");
+    }
     buffers = new LinkedList<LlapCacheableBuffer>();
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
index 49e1b59..76e7605 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
@@ -78,9 +78,10 @@ public class LowLevelLrfuCachePolicy implements LowLevelCachePolicy {
       int lrfuThreshold = (int)((Math.log(1 - Math.pow(0.5, lambda)) / Math.log(0.5)) / lambda);
       maxHeapSize = Math.min(lrfuThreshold, maxBuffers);
     }
-    LlapIoImpl.LOG.info("LRFU cache policy with min buffer size {}", minBufferSize
-        , " and lambda {}", lambda, " (heap size {} ", maxHeapSize + ")");
-
+    if (LlapIoImpl.LOGL.isInfoEnabled()) {
+      LlapIoImpl.LOG.info("LRFU cache policy with min buffer size " + minBufferSize
+          + " and lambda " + lambda + " (heap size " + maxHeapSize + ")");
+    }
 
     heap = new LlapCacheableBuffer[maxHeapSize];
     listHead = listTail = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
index 83a88f5..b38f472 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hive.llap.io.api.impl;
 
+import org.apache.hadoop.hive.llap.LogLevels;
+
 import java.io.IOException;
 import java.util.concurrent.Executors;
 
@@ -56,19 +58,21 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
   public static final Logger LOG = LoggerFactory.getLogger(LlapIoImpl.class);
+  public static final LogLevels LOGL = new LogLevels(LOG);
 
   private final ColumnVectorProducer cvp;
   private final ListeningExecutorService executor;
-  private final LlapDaemonCacheMetrics cacheMetrics;
-  private final LlapDaemonQueueMetrics queueMetrics;
+  private LlapDaemonCacheMetrics cacheMetrics;
+  private LlapDaemonQueueMetrics queueMetrics;
   private ObjectName buddyAllocatorMXBean;
   private EvictionAwareAllocator allocator;
 
   private LlapIoImpl(Configuration conf) throws IOException {
     boolean useLowLevelCache = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_LOW_LEVEL_CACHE);
     // High-level cache not supported yet.
-    LOG.info("Initializing LLAP IO {}", useLowLevelCache ? " with low level cache" : "");
-
+    if (LOGL.isInfoEnabled()) {
+      LOG.info("Initializing LLAP IO" + (useLowLevelCache ? " with low level cache" : ""));
+    }
 
     String displayName = "LlapDaemonCacheMetrics-" + MetricsUtils.getHostName();
     String sessionId = conf.get("llap.daemon.metrics.sessionid");
@@ -111,7 +115,10 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
     // TODO: this should depends on input format and be in a map, or something.
     this.cvp = new OrcColumnVectorProducer(metadataCache, orcCache, cache, conf, cacheMetrics,
         queueMetrics);
-    LOG.info("LLAP IO initialized");
+    if (LOGL.isInfoEnabled()) {
+      LOG.info("LLAP IO initialized");
+    }
+
     registerMXBeans();
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
index 38c31d3..259c483 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
@@ -43,15 +43,16 @@ public class OrcColumnVectorProducer implements ColumnVectorProducer {
   private final Cache<OrcCacheKey> cache;
   private final LowLevelCache lowLevelCache;
   private final Configuration conf;
-  private final boolean _skipCorrupt; // TODO: get rid of this
-  private final LlapDaemonCacheMetrics cacheMetrics;
-  private final LlapDaemonQueueMetrics queueMetrics;
+  private boolean _skipCorrupt; // TODO: get rid of this
+  private LlapDaemonCacheMetrics cacheMetrics;
+  private LlapDaemonQueueMetrics queueMetrics;
 
   public OrcColumnVectorProducer(OrcMetadataCache metadataCache,
       LowLevelCacheImpl lowLevelCache, Cache<OrcCacheKey> cache, Configuration conf,
       LlapDaemonCacheMetrics metrics, LlapDaemonQueueMetrics queueMetrics) {
+    if (LlapIoImpl.LOGL.isInfoEnabled()) {
       LlapIoImpl.LOG.info("Initializing ORC column vector producer");
-
+    }
 
     this.metadataCache = metadataCache;
     this.lowLevelCache = lowLevelCache;

http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index e625490..9bdafc9 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -198,8 +198,9 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
 
   protected Void performDataRead() throws IOException {
     long startTime = counters.startTimeCounter();
-    LlapIoImpl.LOG.info("Processing data for {}", split.getPath());
-
+    if (LlapIoImpl.LOGL.isInfoEnabled()) {
+      LlapIoImpl.LOG.info("Processing data for " + split.getPath());
+    }
     if (processStop()) {
       recordReaderTime(startTime);
       return null;
@@ -744,7 +745,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     long offset = split.getStart(), maxOffset = offset + split.getLength();
     stripeIxFrom = -1;
     int stripeIxTo = -1;
-    if (LlapIoImpl.LOG.isDebugEnabled()) {
+    if (LlapIoImpl.LOGL.isDebugEnabled()) {
       String tmp = "FileSplit {" + split.getStart() + ", " + split.getLength() + "}; stripes ";
       for (StripeInformation stripe : stripes) {
         tmp += "{" + stripe.getOffset() + ", " + stripe.getLength() + "}, ";

http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/ql/src/java/org/apache/hadoop/hive/llap/LogLevels.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LogLevels.java b/ql/src/java/org/apache/hadoop/hive/llap/LogLevels.java
new file mode 100644
index 0000000..300230f
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/llap/LogLevels.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional debugrmation
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap;
+
+import org.slf4j.Logger;
+
+public class LogLevels {
+  private final boolean isT, isD, isI, isW, isE;
+
+  public LogLevels(Logger log) {
+    isT = log.isTraceEnabled();
+    isD = log.isDebugEnabled();
+    isI = log.isInfoEnabled();
+    isW = log.isWarnEnabled();
+    isE = log.isErrorEnabled();
+  }
+
+  public boolean isTraceEnabled() {
+    return isT;
+  }
+
+  public boolean isDebugEnabled() {
+    return isD;
+  }
+
+  public boolean isInfoEnabled() {
+    return isI;
+  }
+
+  public boolean isWarnEnabled() {
+    return isW;
+  }
+
+  public boolean isErrorEnabled() {
+    return isE;
+  }
+}


[27/55] [abbrv] hive git commit: HIVE-12305: CBO: Calcite Operator To Hive Operator (Calcite Return Path): UDAF can not pull up constant expressions (Pengcheng Xiong, reviewed by Ashutosh Chauhan )

Posted by jx...@apache.org.
HIVE-12305: CBO: Calcite Operator To Hive Operator (Calcite Return Path): UDAF can not pull up constant expressions (Pengcheng Xiong, reviewed by Ashutosh Chauhan )


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7073ce32
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7073ce32
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7073ce32

Branch: refs/heads/master-fixed
Commit: 7073ce3244f62ff8c41ae83dbafb4465ad0567e9
Parents: 6237615
Author: Pengcheng Xiong <px...@apache.org>
Authored: Tue Nov 3 11:24:58 2015 -0800
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Tue Nov 3 11:24:58 2015 -0800

----------------------------------------------------------------------
 .../ql/optimizer/calcite/HiveCalciteUtil.java   |    5 +-
 .../cbo_rp_annotate_stats_groupby.q             |  141 ++
 .../cbo_rp_annotate_stats_groupby.q.out         | 1301 ++++++++++++++++++
 3 files changed, 1446 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7073ce32/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index 1cccc77..e2f1cfb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -714,7 +714,10 @@ public class HiveCalciteUtil {
     ExprNodeConverter exprConv = new ExprNodeConverter(inputTabAlias, inputRel.getRowType(),
         new HashSet<Integer>(), inputRel.getCluster().getTypeFactory());
     for (int index = 0; index < rexInputRefs.size(); index++) {
-      if (exprs.get(index) instanceof RexLiteral) {
+      // The following check is only a guard against failures.
+      // TODO: Knowing which expr is constant in GBY's aggregation function
+      // arguments could be better done using Metadata provider of Calcite.
+      if (exprs != null && index < exprs.size() && exprs.get(index) instanceof RexLiteral) {
         ExprNodeDesc exprNodeDesc = exprConv.visitLiteral((RexLiteral) exprs.get(index));
         exprNodes.add(exprNodeDesc);
       } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/7073ce32/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q b/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
new file mode 100644
index 0000000..4d2cac9
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
@@ -0,0 +1,141 @@
+set hive.cbo.returnpath.hiveop=true;
+set hive.stats.fetch.column.stats=true;
+set hive.map.aggr.hash.percentmemory=0.0f;
+
+-- hash aggregation is disabled
+
+-- There are different cases for Group By depending on map/reduce side, hash aggregation,
+-- grouping sets and column stats. If we don't have column stats, we just assume hash
+-- aggregation is disabled. Following are the possible cases and rule for cardinality
+-- estimation
+
+-- MAP SIDE:
+-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows
+-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
+-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism)
+-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet)
+-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows
+-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
+
+-- REDUCE SIDE:
+-- Case 7: NO column stats — numRows / 2
+-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet)
+-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct)
+
+create table if not exists loc_staging (
+  state string,
+  locid int,
+  zip bigint,
+  year int
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table loc_orc like loc_staging;
+alter table loc_orc set fileformat orc;
+
+load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging;
+
+insert overwrite table loc_orc select * from loc_staging;
+
+-- numRows: 8 rawDataSize: 796
+explain select * from loc_orc;
+
+-- partial column stats
+analyze table loc_orc compute statistics for columns state;
+
+-- inner group by: map - numRows: 8 reduce - numRows: 4
+-- outer group by: map - numRows: 4 reduce numRows: 2
+explain select a, c, min(b)
+from ( select state as a, locid as b, count(*) as c
+       from loc_orc
+       group by state,locid
+     ) sq1
+group by a,c;
+
+analyze table loc_orc compute statistics for columns state,locid,year;
+
+-- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select year from loc_orc group by year;
+
+-- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 9: column stats, NO grouping sets - caridnality = 8
+explain select state,locid from loc_orc group by state,locid;
+
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
+-- Case 8: column stats, grouping sets - cardinality = 32
+explain select state,locid from loc_orc group by state,locid with cube;
+
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
+-- Case 8: column stats, grouping sets - cardinality = 24
+explain select state,locid from loc_orc group by state,locid with rollup;
+
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8
+-- Case 8: column stats, grouping sets - cardinality = 8
+explain select state,locid from loc_orc group by state,locid grouping sets((state));
+
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16
+-- Case 8: column stats, grouping sets - cardinality = 16
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
+
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
+-- Case 8: column stats, grouping sets - cardinality = 24
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
+
+-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
+-- Case 8: column stats, grouping sets - cardinality = 32
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
+
+set hive.map.aggr.hash.percentmemory=0.5f;
+set mapred.max.split.size=80;
+-- map-side parallelism will be 10
+
+-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select year from loc_orc group by year;
+
+-- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16
+-- Case 8: column stats, grouping sets - cardinality = 16
+explain select state,locid from loc_orc group by state,locid with cube;
+
+-- ndvProduct becomes 0 as zip does not have column stats
+-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select state,zip from loc_orc group by state,zip;
+
+set mapred.max.split.size=1000;
+set hive.stats.fetch.column.stats=false;
+
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
+explain select state,locid from loc_orc group by state,locid with cube;
+
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
+-- Case 7: NO column stats - cardinality = 12
+explain select state,locid from loc_orc group by state,locid with rollup;
+
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 7: NO column stats - cardinality = 4
+explain select state,locid from loc_orc group by state,locid grouping sets((state));
+
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16
+-- Case 7: NO column stats - cardinality = 8
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
+
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
+-- Case 7: NO column stats - cardinality = 12
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
+
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
+
+set mapred.max.split.size=80;
+
+-- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 7: NO column stats - cardinality = 4
+explain select year from loc_orc group by year;
+
+-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
+explain select state,locid from loc_orc group by state,locid with cube;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/7073ce32/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
new file mode 100644
index 0000000..b47a3b3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
@@ -0,0 +1,1301 @@
+PREHOOK: query: -- hash aggregation is disabled
+
+-- There are different cases for Group By depending on map/reduce side, hash aggregation,
+-- grouping sets and column stats. If we don't have column stats, we just assume hash
+-- aggregation is disabled. Following are the possible cases and rule for cardinality
+-- estimation
+
+-- MAP SIDE:
+-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows
+-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
+-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism)
+-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet)
+-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows
+-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
+
+-- REDUCE SIDE:
+-- Case 7: NO column stats — numRows / 2
+-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet)
+-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct)
+
+create table if not exists loc_staging (
+  state string,
+  locid int,
+  zip bigint,
+  year int
+) row format delimited fields terminated by '|' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@loc_staging
+POSTHOOK: query: -- hash aggregation is disabled
+
+-- There are different cases for Group By depending on map/reduce side, hash aggregation,
+-- grouping sets and column stats. If we don't have column stats, we just assume hash
+-- aggregation is disabled. Following are the possible cases and rule for cardinality
+-- estimation
+
+-- MAP SIDE:
+-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows
+-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
+-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism)
+-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet)
+-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows
+-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
+
+-- REDUCE SIDE:
+-- Case 7: NO column stats — numRows / 2
+-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet)
+-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct)
+
+create table if not exists loc_staging (
+  state string,
+  locid int,
+  zip bigint,
+  year int
+) row format delimited fields terminated by '|' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@loc_staging
+PREHOOK: query: create table loc_orc like loc_staging
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@loc_orc
+POSTHOOK: query: create table loc_orc like loc_staging
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@loc_orc
+PREHOOK: query: alter table loc_orc set fileformat orc
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@loc_orc
+PREHOOK: Output: default@loc_orc
+POSTHOOK: query: alter table loc_orc set fileformat orc
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@loc_orc
+POSTHOOK: Output: default@loc_orc
+PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@loc_staging
+POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@loc_staging
+PREHOOK: query: insert overwrite table loc_orc select * from loc_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_staging
+PREHOOK: Output: default@loc_orc
+POSTHOOK: query: insert overwrite table loc_orc select * from loc_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_staging
+POSTHOOK: Output: default@loc_orc
+POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ]
+POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ]
+POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ]
+PREHOOK: query: -- numRows: 8 rawDataSize: 796
+explain select * from loc_orc
+PREHOOK: type: QUERY
+POSTHOOK: query: -- numRows: 8 rawDataSize: 796
+explain select * from loc_orc
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: loc_orc
+          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
+            outputColumnNames: state, locid, zip, year
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- partial column stats
+analyze table loc_orc compute statistics for columns state
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc
+#### A masked pattern was here ####
+POSTHOOK: query: -- partial column stats
+analyze table loc_orc compute statistics for columns state
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc
+#### A masked pattern was here ####
+PREHOOK: query: -- inner group by: map - numRows: 8 reduce - numRows: 4
+-- outer group by: map - numRows: 4 reduce numRows: 2
+explain select a, c, min(b)
+from ( select state as a, locid as b, count(*) as c
+       from loc_orc
+       group by state,locid
+     ) sq1
+group by a,c
+PREHOOK: type: QUERY
+POSTHOOK: query: -- inner group by: map - numRows: 8 reduce - numRows: 4
+-- outer group by: map - numRows: 4 reduce numRows: 2
+explain select a, c, min(b)
+from ( select state as a, locid as b, count(*) as c
+       from loc_orc
+       group by state,locid
+     ) sq1
+group by a,c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: sq1:loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
+              Group By Operator
+                aggregations: count()
+                keys: state (type: string), locid (type: int)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 8 Data size: 752 Basic stats: COMPLETE Column stats: PARTIAL
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 8 Data size: 752 Basic stats: COMPLETE Column stats: PARTIAL
+                  value expressions: _col2 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid, $f2
+          Statistics: Num rows: 7 Data size: 658 Basic stats: COMPLETE Column stats: PARTIAL
+          Group By Operator
+            aggregations: min(locid)
+            keys: state (type: string), $f2 (type: bigint)
+            mode: hash
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string), _col1 (type: bigint)
+              sort order: ++
+              Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
+              Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL
+              value expressions: _col2 (type: int)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0)
+          keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
+          mode: mergepartial
+          outputColumnNames: state, $f2, $f2_0
+          Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc
+#### A masked pattern was here ####
+PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select year from loc_orc group by year
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select year from loc_orc group by year
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: year (type: int)
+              outputColumnNames: year
+              Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: year (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: year
+          Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 9: column stats, NO grouping sets - caridnality = 8
+explain select state,locid from loc_orc group by state,locid
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 9: column stats, NO grouping sets - caridnality = 8
+explain select state,locid from loc_orc group by state,locid
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: state (type: string), locid (type: int)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
+-- Case 8: column stats, grouping sets - cardinality = 32
+explain select state,locid from loc_orc group by state,locid with cube
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
+-- Case 8: column stats, grouping sets - cardinality = 32
+explain select state,locid from loc_orc group by state,locid with cube
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
+-- Case 8: column stats, grouping sets - cardinality = 24
+explain select state,locid from loc_orc group by state,locid with rollup
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
+-- Case 8: column stats, grouping sets - cardinality = 24
+explain select state,locid from loc_orc group by state,locid with rollup
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8
+-- Case 8: column stats, grouping sets - cardinality = 8
+explain select state,locid from loc_orc group by state,locid grouping sets((state))
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8
+-- Case 8: column stats, grouping sets - cardinality = 8
+explain select state,locid from loc_orc group by state,locid grouping sets((state))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16
+-- Case 8: column stats, grouping sets - cardinality = 16
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid))
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16
+-- Case 8: column stats, grouping sets - cardinality = 16
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
+-- Case 8: column stats, grouping sets - cardinality = 24
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),())
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
+-- Case 8: column stats, grouping sets - cardinality = 24
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),())
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
+-- Case 8: column stats, grouping sets - cardinality = 32
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),())
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
+-- Case 8: column stats, grouping sets - cardinality = 32
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),())
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- map-side parallelism will be 10
+
+-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select year from loc_orc group by year
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-side parallelism will be 10
+
+-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select year from loc_orc group by year
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: year (type: int)
+              outputColumnNames: year
+              Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: year (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: year
+          Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16
+-- Case 8: column stats, grouping sets - cardinality = 16
+explain select state,locid from loc_orc group by state,locid with cube
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16
+-- Case 8: column stats, grouping sets - cardinality = 16
+explain select state,locid from loc_orc group by state,locid with cube
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- ndvProduct becomes 0 as zip does not have column stats
+-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select state,zip from loc_orc group by state,zip
+PREHOOK: type: QUERY
+POSTHOOK: query: -- ndvProduct becomes 0 as zip does not have column stats
+-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
+-- Case 9: column stats, NO grouping sets - caridnality = 2
+explain select state,zip from loc_orc group by state,zip
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
+            Select Operator
+              expressions: state (type: string), zip (type: bigint)
+              outputColumnNames: state, zip
+              Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
+              Group By Operator
+                keys: state (type: string), zip (type: bigint)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 4 Data size: 344 Basic stats: COMPLETE Column stats: PARTIAL
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: bigint)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
+                  Statistics: Num rows: 4 Data size: 344 Basic stats: COMPLETE Column stats: PARTIAL
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
+          mode: mergepartial
+          outputColumnNames: state, zip
+          Statistics: Num rows: 4 Data size: 344 Basic stats: COMPLETE Column stats: PARTIAL
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 4 Data size: 344 Basic stats: COMPLETE Column stats: PARTIAL
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
+explain select state,locid from loc_orc group by state,locid with cube
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
+explain select state,locid from loc_orc group by state,locid with cube
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
+-- Case 7: NO column stats - cardinality = 12
+explain select state,locid from loc_orc group by state,locid with rollup
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
+-- Case 7: NO column stats - cardinality = 12
+explain select state,locid from loc_orc group by state,locid with rollup
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 7: NO column stats - cardinality = 4
+explain select state,locid from loc_orc group by state,locid grouping sets((state))
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 7: NO column stats - cardinality = 4
+explain select state,locid from loc_orc group by state,locid grouping sets((state))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16
+-- Case 7: NO column stats - cardinality = 8
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid))
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16
+-- Case 7: NO column stats - cardinality = 8
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
+-- Case 7: NO column stats - cardinality = 12
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),())
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
+-- Case 7: NO column stats - cardinality = 12
+explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),())
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),())
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
+explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),())
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 7: NO column stats - cardinality = 4
+explain select year from loc_orc group by year
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
+-- Case 7: NO column stats - cardinality = 4
+explain select year from loc_orc group by year
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: year (type: int)
+              outputColumnNames: year
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: year (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: year
+          Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
+explain select state,locid from loc_orc group by state,locid with cube
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
+-- Case 7: NO column stats - cardinality = 16
+explain select state,locid from loc_orc group by state,locid with cube
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: state (type: string), locid (type: int)
+              outputColumnNames: state, locid
+              Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: state (type: string), locid (type: int), '0' (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+                  Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
+          mode: mergepartial
+          outputColumnNames: state, locid
+          Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: state (type: string), locid (type: int)
+            outputColumnNames: state, locid
+            Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+


[11/55] [abbrv] hive git commit: HIVE-12295 : change some logs from info to debug (Sergey Shelukhin, reviewed by Ashutosh Chauhan)

Posted by jx...@apache.org.
HIVE-12295 : change some logs from info to debug (Sergey Shelukhin, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a46005cf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a46005cf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a46005cf

Branch: refs/heads/master-fixed
Commit: a46005cfb260fa1328a9c237796c1fa683a5c35a
Parents: 6fda3b5
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Nov 2 13:07:04 2015 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Nov 2 13:07:04 2015 -0800

----------------------------------------------------------------------
 .../llap/daemon/impl/ContainerRunnerImpl.java   |  2 +-
 .../llap/daemon/impl/TaskExecutorService.java   | 21 +++++-----
 .../llap/daemon/impl/TaskRunnerCallable.java    |  4 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |  4 +-
 .../llap/shufflehandler/ShuffleHandler.java     | 13 +++---
 .../dag/app/rm/LlapTaskSchedulerService.java    |  6 +--
 .../hive/metastore/AggregateStatsCache.java     |  2 +-
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |  8 ++--
 .../hadoop/hive/ql/exec/MapredContext.java      |  2 +-
 .../apache/hadoop/hive/ql/exec/Operator.java    | 42 ++++++++++----------
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  6 +--
 .../hadoop/hive/ql/exec/mr/ObjectCache.java     | 10 ++---
 .../hive/ql/exec/tez/LlapObjectCache.java       | 18 +++++----
 .../hadoop/hive/ql/exec/tez/TezProcessor.java   | 10 +++--
 .../ql/exec/vector/VectorGroupByOperator.java   |  2 +-
 .../ql/exec/vector/VectorizationContext.java    | 11 +++--
 .../ql/io/HiveContextAwareRecordReader.java     |  2 +-
 .../hadoop/hive/ql/io/HiveInputFormat.java      |  4 +-
 .../physical/NullScanTaskDispatcher.java        |  4 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  | 11 +++--
 .../hadoop/hive/ql/ppd/OpProcFactory.java       | 18 ++++++---
 21 files changed, 114 insertions(+), 86 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index fad2d2c..4b28b53 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -191,7 +191,7 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
 
       Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);
 
-      LOG.info("DEBUG: Registering request with the ShuffleHandler");
+      LOG.debug("Registering request with the ShuffleHandler");
       ShuffleHandler.get()
           .registerDag(request.getApplicationIdString(), dagIdentifier, jobToken,
               request.getUser(), localDirs);

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 875aef6..5e2c6dd 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -331,8 +331,8 @@ public class TaskExecutorService extends AbstractService implements Scheduler<Ta
       if (evictedTask != taskWrapper) {
         knownTasks.put(taskWrapper.getRequestId(), taskWrapper);
         taskWrapper.setIsInWaitQueue(true);
-        if (isInfoEnabled) {
-          LOG.info("{} added to wait queue. Current wait queue size={}", task.getRequestId(),
+        if (isDebugEnabled) {
+          LOG.debug("{} added to wait queue. Current wait queue size={}", task.getRequestId(),
               waitQueue.size());
         }
       } else {
@@ -413,8 +413,9 @@ public class TaskExecutorService extends AbstractService implements Scheduler<Ta
         // is actually available for execution and will not potentially result in a RejectedExecution
         Futures.addCallback(future, wrappedCallback, executionCompletionExecutorService);
 
-        if (isInfoEnabled) {
-          LOG.info("{} scheduled for execution. canFinish={}", taskWrapper.getRequestId(), canFinish);
+        if (isDebugEnabled) {
+          LOG.debug("{} scheduled for execution. canFinish={}",
+              taskWrapper.getRequestId(), canFinish);
         }
 
         // only tasks that cannot finish immediately are pre-emptable. In other words, if all inputs
@@ -465,7 +466,7 @@ public class TaskExecutorService extends AbstractService implements Scheduler<Ta
     synchronized (lock) {
       if (taskWrapper.isInWaitQueue()) {
         // Re-order the wait queue
-        LOG.info("DEBUG: Re-ordering the wait queue since {} finishable state moved to {}",
+        LOG.debug("Re-ordering the wait queue since {} finishable state moved to {}",
             taskWrapper.getRequestId(), newFinishableState);
         if (waitQueue.remove(taskWrapper)) {
           // Put element back only if it existed.
@@ -477,12 +478,12 @@ public class TaskExecutorService extends AbstractService implements Scheduler<Ta
       }
 
       if (newFinishableState == true && taskWrapper.isInPreemptionQueue()) {
-        LOG.info("DEBUG: Removing {} from preemption queue because it's state changed to {}",
+        LOG.debug("Removing {} from preemption queue because it's state changed to {}",
             taskWrapper.getRequestId(), newFinishableState);
         preemptionQueue.remove(taskWrapper.getTaskRunnerCallable());
       } else if (newFinishableState == false && !taskWrapper.isInPreemptionQueue() &&
           !taskWrapper.isInWaitQueue()) {
-        LOG.info("DEBUG: Adding {} to preemption queue since finishable state changed to {}",
+        LOG.debug("Adding {} to preemption queue since finishable state changed to {}",
             taskWrapper.getRequestId(), newFinishableState);
         preemptionQueue.offer(taskWrapper);
       }
@@ -554,9 +555,11 @@ public class TaskExecutorService extends AbstractService implements Scheduler<Ta
       }
 
       numSlotsAvailable.incrementAndGet();
-      LOG.info("Task {} complete. WaitQueueSize={}, numSlotsAvailable={}, preemptionQueueSize={}",
+      if (isDebugEnabled) {
+        LOG.debug("Task {} complete. WaitQueueSize={}, numSlotsAvailable={}, preemptionQueueSize={}",
           taskWrapper.getRequestId(), waitQueue.size(), numSlotsAvailable.get(),
           preemptionQueue.size());
+      }
       synchronized (lock) {
         if (!waitQueue.isEmpty()) {
           lock.notify();
@@ -707,7 +710,7 @@ public class TaskExecutorService extends AbstractService implements Scheduler<Ta
     public void finishableStateUpdated(boolean finishableState) {
       // This method should not by synchronized. Can lead to deadlocks since it calls a sync method.
       // Meanwhile the scheduler could try updating states via a synchronized method.
-      LOG.info("DEBUG: Received finishable state update for {}, state={}",
+      LOG.info("Received finishable state update for {}, state={}",
           taskRunnerCallable.getRequestId(), finishableState);
       taskExecutorService.finishableStateUpdated(this, finishableState);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index 3b38597..30a38c8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -272,7 +272,7 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
             shouldRunTask = false;
           } else {
             // If the task hasn't started, and it is killed - report back to the AM that the task has been killed.
-            LOG.info("DBG: Reporting taskKilled for non-started fragment {}", getRequestId());
+            LOG.debug("Reporting taskKilled for non-started fragment {}", getRequestId());
             reportTaskKilled();
           }
           if (!isStarted.get()) {
@@ -398,7 +398,7 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
       switch(result.getEndReason()) {
         // Only the KILLED case requires a message to be sent out to the AM.
         case SUCCESS:
-          LOG.info("Successfully finished {}", requestId);
+          LOG.debug("Successfully finished {}", requestId);
           metrics.incrExecutorTotalSuccess();
           break;
         case CONTAINER_STOP_REQUESTED:

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index 9bdafc9..1d0fdf0 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -168,8 +168,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
 
   @Override
   public void stop() {
-    if (LOG.isInfoEnabled()) {
-      LOG.info("Encoded reader is being stopped");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Encoded reader is being stopped");
     }
     isStopped = true;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
index 762f069..b042455 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
@@ -417,7 +417,7 @@ public class ShuffleHandler implements AttemptRegistrationListener {
     // TODO Fix this. There's a race here, where an app may think everything is registered, finish really fast, send events and the consumer will not find the registration.
     Boolean registered = registeredApps.putIfAbsent(applicationIdString, Boolean.valueOf(true));
     if (registered == null) {
-      LOG.info("DEBUG: Registering watches for AppDirs: appId=" + applicationIdString);
+      LOG.debug("Registering watches for AppDirs: appId=" + applicationIdString);
       recordJobShuffleInfo(applicationIdString, user, appToken);
       if (dirWatcher != null) {
         for (String appDir : appDirs) {
@@ -538,7 +538,7 @@ public class ShuffleHandler implements AttemptRegistrationListener {
               @Override
               public void onRemoval(
                   RemovalNotification<AttemptPathIdentifier, AttemptPathInfo> notification) {
-                LOG.info("DEBUG: PathCacheEviction: " + notification.getKey() + ", Reason=" +
+                LOG.debug("PathCacheEviction: " + notification.getKey() + ", Reason=" +
                     notification.getCause());
               }
             })
@@ -561,7 +561,7 @@ public class ShuffleHandler implements AttemptRegistrationListener {
             Path mapOutputFileName =
                 lDirAlloc.getLocalPathToRead(attemptBase + "/" + DATA_FILE_NAME, conf);
 
-            LOG.info("DEBUG: Loaded : " + key + " via loader");
+            LOG.debug("Loaded : " + key + " via loader");
             if (dirWatcher != null) {
               dirWatcher.attemptInfoFound(key);
             }
@@ -582,7 +582,7 @@ public class ShuffleHandler implements AttemptRegistrationListener {
 
     void registerAttemptDirs(AttemptPathIdentifier identifier,
                                     AttemptPathInfo pathInfo) {
-      LOG.info("DEBUG: Registering " + identifier + " via watcher");
+      LOG.debug("Registering " + identifier + " via watcher");
       pathCache.put(identifier, pathInfo);
     }
 
@@ -748,7 +748,10 @@ public class ShuffleHandler implements AttemptRegistrationListener {
       try {
         AttemptPathIdentifier identifier = new AttemptPathIdentifier(jobId, dagId, user, mapId);
         pathInfo = pathCache.get(identifier);
-        LOG.info("DEBUG: Retrieved pathInfo for " + identifier + " check for corresponding loaded messages to determine whether it was loaded or cached");
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Retrieved pathInfo for " + identifier + " check for corresponding "
+              + "loaded messages to determine whether it was loaded or cached");
+        }
       } catch (ExecutionException e) {
         if (e.getCause() instanceof IOException) {
           throw (IOException) e.getCause();

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java b/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
index 6fd01f9..e920f86 100644
--- a/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
+++ b/llap-server/src/java/org/apache/tez/dag/app/rm/LlapTaskSchedulerService.java
@@ -359,12 +359,12 @@ public class LlapTaskSchedulerService extends TaskScheduler {
 
   @Override
   public void blacklistNode(NodeId nodeId) {
-    LOG.info("DEBUG: BlacklistNode not supported");
+    LOG.info("BlacklistNode not supported");
   }
 
   @Override
   public void unblacklistNode(NodeId nodeId) {
-    LOG.info("DEBUG: unBlacklistNode not supported");
+    LOG.info("unBlacklistNode not supported");
   }
 
   @Override
@@ -494,7 +494,7 @@ public class LlapTaskSchedulerService extends TaskScheduler {
 
   @Override
   public Object deallocateContainer(ContainerId containerId) {
-    LOG.info("DEBUG: Ignoring deallocateContainer for containerId: " + containerId);
+    LOG.debug("Ignoring deallocateContainer for containerId: " + containerId);
     // Containers are not being tracked for re-use.
     // This is safe to ignore since a deallocate task will come in.
     return null;

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
index 58c9f9e..4ab178c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
@@ -171,7 +171,7 @@ public class AggregateStatsCache {
     AggrColStatsList candidateList = cacheStore.get(key);
     // No key, or no nodes in candidate list
     if ((candidateList == null) || (candidateList.nodes.size() == 0)) {
-      LOG.info("No aggregate stats cached for " + key.toString());
+      LOG.debug("No aggregate stats cached for " + key.toString());
       return null;
     }
     // Find the value object

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index 95fd1bf..4af98e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -160,8 +160,8 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
        * requires changes in the Tez API with regard to finding bucket id and
        * also ability to schedule tasks to re-use containers that have cached the specific bucket.
        */
-      if (isLogInfoEnabled) {
-        LOG.info("This is not bucket map join, so cache");
+      if (isLogDebugEnabled) {
+        LOG.debug("This is not bucket map join, so cache");
       }
 
       Future<Pair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]>> future =
@@ -200,12 +200,12 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
 
         loadHashTable(getExecContext(), MapredContext.get());
       } else {
-        if (LOG.isInfoEnabled()) {
+        if (LOG.isDebugEnabled()) {
           String s = "Using tables from cache: [";
           for (MapJoinTableContainer c : pair.getLeft()) {
             s += ((c == null) ? "null" : c.getClass().getSimpleName()) + ", ";
           }
-          LOG.info(s + "]");
+          LOG.debug(s + "]");
         }
         // let's use the table from the cache.
         mapJoinTables = pair.getLeft();

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
index caf4aa3..6ce84ac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
@@ -52,7 +52,7 @@ public class MapredContext {
         HiveConf.getVar(jobConf, ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") ?
             new TezContext(isMap, jobConf) : new MapredContext(isMap, jobConf);
     contexts.set(context);
-    logger.info("MapredContext initialized.");
+    logger.debug("MapredContext initialized.");
     return context;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 7282228..e2630ad 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -373,8 +373,8 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
           || childOperatorsArray.length != childOperators.size()) {
         throw new AssertionError("Internal error during operator initialization");
       }
-      if (isLogInfoEnabled) {
-        LOG.info("Initialization Done " + id + " " + getName());
+      if (isLogDebugEnabled) {
+        LOG.debug("Initialization Done " + id + " " + getName());
       }
 
       initializeChildren(hconf);
@@ -386,8 +386,8 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
       }
     }
 
-    if (isLogInfoEnabled) {
-      LOG.info("Initialization Done " + id + " " + getName() + " done is reset.");
+    if (isLogDebugEnabled) {
+      LOG.debug("Initialization Done " + id + " " + getName() + " done is reset.");
     }
 
     // let's wait on the async ops before continuing
@@ -459,14 +459,14 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
    */
   protected void initializeChildren(Configuration hconf) throws HiveException {
     state = State.INIT;
-    if (isLogInfoEnabled) {
-      LOG.info("Operator " + id + " " + getName() + " initialized");
+    if (isLogDebugEnabled) {
+      LOG.debug("Operator " + id + " " + getName() + " initialized");
     }
     if (childOperators == null || childOperators.isEmpty()) {
       return;
     }
-    if (isLogInfoEnabled) {
-      LOG.info("Initializing children of " + id + " " + getName());
+    if (isLogDebugEnabled) {
+      LOG.debug("Initializing children of " + id + " " + getName());
     }
     for (int i = 0; i < childOperatorsArray.length; i++) {
       childOperatorsArray[i].initialize(hconf, outputObjInspector, childOperatorsTag[i]);
@@ -503,8 +503,8 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
    */
   protected void initialize(Configuration hconf, ObjectInspector inputOI,
       int parentId) throws HiveException {
-    if (isLogInfoEnabled) {
-      LOG.info("Initializing child " + id + " " + getName());
+    if (isLogDebugEnabled) {
+      LOG.debug("Initializing child " + id + " " + getName());
     }
     // Double the size of the array if needed
     if (parentId >= inputObjInspectors.length) {
@@ -646,8 +646,8 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
     // set state as CLOSE as long as all parents are closed
     // state == CLOSE doesn't mean all children are also in state CLOSE
     state = State.CLOSE;
-    if (isLogInfoEnabled) {
-      LOG.info(id + " finished. closing... ");
+    if (isLogDebugEnabled) {
+      LOG.debug(id + " finished. closing... ");
     }
 
     abort |= abortOp.get();
@@ -664,14 +664,14 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
       }
 
       for (Operator<? extends OperatorDesc> op : childOperators) {
-	if (isLogDebugEnabled) {
-	  LOG.debug("Closing child = " + op);
-	}
+        if (isLogDebugEnabled) {
+          LOG.debug("Closing child = " + op);
+        }
         op.close(abort);
       }
 
-      if (isLogInfoEnabled) {
-	LOG.info(id + " Close done");
+      if (isLogDebugEnabled) {
+        LOG.debug(id + " Close done");
       }
     } catch (HiveException e) {
       e.printStackTrace();
@@ -893,10 +893,12 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
   }
 
   public void logStats() {
-    if (isLogInfoEnabled) {
-      for (String e : statsMap.keySet()) {
-        LOG.info(e.toString() + ":" + statsMap.get(e).toString());
+    if (isLogInfoEnabled && !statsMap.isEmpty()) {
+      StringBuilder sb = new StringBuilder();
+      for (Map.Entry<String, LongWritable> e : statsMap.entrySet()) {
+        sb.append(e.getKey()).append(":").append(statsMap.get(e).toString()).append(", ");
       }
+      LOG.info(sb.toString());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 0618077..665b3f7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -399,11 +399,11 @@ public final class Utilities {
         } else if (ShimLoader.getHadoopShims().isLocalMode(conf)) {
           localPath = path;
         } else {
-          LOG.info("***************non-local mode***************");
+          LOG.debug("***************non-local mode***************");
           localPath = new Path(name);
         }
         localPath = path;
-        LOG.info("local path = " + localPath);
+        LOG.debug("local path = " + localPath);
         if (HiveConf.getBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN)) {
           LOG.debug("Loading plan from string: "+path.toUri().getPath());
           String planString = conf.getRaw(path.toUri().getPath());
@@ -415,7 +415,7 @@ public final class Utilities {
           in = new ByteArrayInputStream(planBytes);
           in = new InflaterInputStream(in);
         } else {
-          LOG.info("Open file to read in plan: " + localPath);
+          LOG.debug("Open file to read in plan: " + localPath);
           in = localPath.getFileSystem(conf).open(localPath);
         }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
index 7baf9b2..008f8a4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
@@ -36,21 +36,21 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 public class ObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCache {
 
   private static final Logger LOG = LoggerFactory.getLogger(ObjectCache.class.getName());
-  private static final boolean isInfoEnabled = LOG.isInfoEnabled();
+  private static final boolean isDebugEnabled = LOG.isDebugEnabled();
 
   @Override
   public void release(String key) {
     // nothing to do
-    if (isInfoEnabled) {
-      LOG.info(key + " no longer needed");
+    if (isDebugEnabled) {
+      LOG.debug(key + " no longer needed");
     }
   }
 
   @Override
   public <T> T retrieve(String key, Callable<T> fn) throws HiveException {
     try {
-      if (isInfoEnabled) {
-        LOG.info("Creating " + key);
+      if (isDebugEnabled) {
+        LOG.debug("Creating " + key);
       }
       return fn.call();
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
index 6f77453..0141230 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
@@ -44,7 +44,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
 
   private static ExecutorService staticPool = Executors.newCachedThreadPool();
 
-  private static final boolean isLogInfoEnabled = LOG.isInfoEnabled();
+  private static final boolean isLogDebugEnabled = LOG.isDebugEnabled();
 
   private final Cache<String, Object> registry = CacheBuilder.newBuilder().softValues().build();
 
@@ -69,8 +69,8 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
     try {
       value = (T) registry.getIfPresent(key);
       if (value != null) {
-        if (isLogInfoEnabled) {
-          LOG.info("Found " + key + " in cache");
+        if (isLogDebugEnabled) {
+          LOG.debug("Found " + key + " in cache");
         }
         return value;
       }
@@ -91,8 +91,8 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
       try {
         value = (T) registry.getIfPresent(key);
         if (value != null) {
-          if (isLogInfoEnabled) {
-            LOG.info("Found " + key + " in cache");
+          if (isLogDebugEnabled) {
+            LOG.debug("Found " + key + " in cache");
           }
           return value;
         }
@@ -108,8 +108,8 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
 
       lock.lock();
       try {
-        if (isLogInfoEnabled) {
-          LOG.info("Caching new object for key: " + key);
+        if (isLogDebugEnabled) {
+          LOG.debug("Caching new object for key: " + key);
         }
 
         registry.put(key, value);
@@ -135,7 +135,9 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
 
   @Override
   public void remove(String key) {
-    LOG.info("Removing key: " + key);
+    if (isLogDebugEnabled) {
+      LOG.debug("Removing key: " + key);
+    }
     registry.invalidate(key);
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
index 23f2487..c560f37 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
@@ -140,11 +140,13 @@ public class TezProcessor extends AbstractLogicalIOProcessor {
       return;
     }
 
-      perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_PROCESSOR);
-      // in case of broadcast-join read the broadcast edge inputs
-      // (possibly asynchronously)
+    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_PROCESSOR);
+    // in case of broadcast-join read the broadcast edge inputs
+    // (possibly asynchronously)
 
-      LOG.info("Running task: " + getContext().getUniqueIdentifier());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Running task: " + getContext().getUniqueIdentifier());
+    }
 
     synchronized (this) {
       if (isMap) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
index 0bea5ff..35bbaef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
@@ -318,7 +318,7 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
 
       mapKeysAggregationBuffers = new HashMap<KeyWrapper, VectorAggregationBufferRow>();
       computeMemoryLimits();
-      LOG.info("using hash aggregation processing mode");
+      LOG.debug("using hash aggregation processing mode");
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index f00804e..3489c9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -157,7 +157,10 @@ public class VectorizationContext {
   public VectorizationContext(String contextName, List<String> initialColumnNames) {
     this.contextName = contextName;
     level = 0;
-    LOG.info("VectorizationContext consructor contextName " + contextName + " level " + level + " initialColumnNames " + initialColumnNames.toString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("VectorizationContext consructor contextName " + contextName + " level "
+          + level + " initialColumnNames " + initialColumnNames);
+    }
     this.projectionColumnNames = initialColumnNames;
 
     projectedColumns = new ArrayList<Integer>();
@@ -177,8 +180,10 @@ public class VectorizationContext {
   public VectorizationContext(String contextName) {
     this.contextName = contextName;
     level = 0;
-    LOG.info("VectorizationContext consructor contextName " + contextName + " level " + level);
-      projectedColumns = new ArrayList<Integer>();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("VectorizationContext consructor contextName " + contextName + " level " + level);
+    }
+    projectedColumns = new ArrayList<Integer>();
     projectionColumnNames = new ArrayList<String>();
     projectionColumnMap = new HashMap<String, Integer>();
     this.ocm = new OutputColumnManager(0);

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
index 7d36e42..4a05a62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
@@ -171,7 +171,7 @@ public abstract class HiveContextAwareRecordReader<K, V> implements RecordReader
     ioCxtRef.setCurrentBlockStart(startPos);
     ioCxtRef.setBlockPointer(isBlockPointer);
     ioCxtRef.setInputPath(inputPath);
-    LOG.info("Processing file " + inputPath);
+    LOG.debug("Processing file " + inputPath); // Logged at INFO in multiple other places.
     initDone = true;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index 29c4b61..a883124 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -210,7 +210,9 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
       LOG.info("Not using llap for " + inputFormat + ": " + isSupported + ", " + isVector);
       return inputFormat;
     }
-    LOG.info("Wrapping " + inputFormat);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Wrapping " + inputFormat);
+    }
     @SuppressWarnings("unchecked")
     LlapIo<VectorizedRowBatch> llapIo = LlapIoProxy.getIo();
     if (llapIo == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java
index 2e924fb..0c4519c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java
@@ -172,7 +172,7 @@ public class NullScanTaskDispatcher implements Dispatcher {
         return null;
       }
 
-      LOG.info("Looking for table scans where optimization is applicable");
+      LOG.debug("Looking for table scans where optimization is applicable");
 
       // The dispatcher fires the processor corresponding to the closest
       // matching rule and passes the context along
@@ -196,7 +196,7 @@ public class NullScanTaskDispatcher implements Dispatcher {
 
       ogw.startWalking(topNodes, null);
 
-      LOG.info(String.format("Found %d null table scans",
+      LOG.debug(String.format("Found %d null table scans",
           walkerCtx.getMetadataOnlyTableScans().size()));
       if (walkerCtx.getMetadataOnlyTableScans().size() > 0)
         processAlias(mapWork, walkerCtx.getMetadataOnlyTableScans());

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 97e7013..a8ebf8f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -744,7 +744,7 @@ public class Vectorizer implements PhysicalPlanResolver {
         }
         vContext = taskVectorizationContext;
       } else {
-        LOG.info("MapWorkVectorizationNodeProcessor process going to walk the operator stack to get vectorization context for " + op.getName());
+        LOG.debug("MapWorkVectorizationNodeProcessor process going to walk the operator stack to get vectorization context for " + op.getName());
         vContext = walkStackToFindVectorizationContext(stack, op);
         if (vContext == null) {
           // No operator has "pushed" a new context -- so use the task vectorization context.
@@ -753,7 +753,10 @@ public class Vectorizer implements PhysicalPlanResolver {
       }
 
       assert vContext != null;
-      LOG.info("MapWorkVectorizationNodeProcessor process operator " + op.getName() + " using vectorization context" + vContext.toString());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("MapWorkVectorizationNodeProcessor process operator " + op.getName()
+            + " using vectorization context" + vContext.toString());
+      }
 
       // When Vectorized GROUPBY outputs rows instead of vectorized row batchs, we don't
       // vectorize the operators below it.
@@ -1986,8 +1989,8 @@ public class Vectorizer implements PhysicalPlanResolver {
         break;
     }
 
-    LOG.info("vectorizeOperator " + (vectorOp == null ? "NULL" : vectorOp.getClass().getName()));
-    LOG.info("vectorizeOperator " + (vectorOp == null || vectorOp.getConf() == null ? "NULL" : vectorOp.getConf().getClass().getName()));
+    LOG.debug("vectorizeOperator " + (vectorOp == null ? "NULL" : vectorOp.getClass().getName()));
+    LOG.debug("vectorizeOperator " + (vectorOp == null || vectorOp.getConf() == null ? "NULL" : vectorOp.getConf().getClass().getName()));
 
     if (vectorOp != op) {
       fixupParentChildOperators(op, vectorOp);

http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
index 5b85c93..3605484 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
@@ -127,7 +127,7 @@ public final class OpProcFactory {
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
-      LOG.info("Processing for " + nd.getName() + "("
+      LOG.debug("Processing for " + nd.getName() + "("
           + ((Operator) nd).getIdentifier() + ")");
       // script operator is a black-box to hive so no optimization here
       // assuming that nothing can be pushed above the script op
@@ -709,13 +709,19 @@ public final class OpProcFactory {
      * @param ewi
      */
     protected void logExpr(Node nd, ExprWalkerInfo ewi) {
-      for (Entry<String, List<ExprNodeDesc>> e : ewi.getFinalCandidates()
-          .entrySet()) {
-        LOG.info("Pushdown Predicates of " + nd.getName() + " For Alias : "
-            + e.getKey());
+      if (!LOG.isInfoEnabled()) return;
+      for (Entry<String, List<ExprNodeDesc>> e : ewi.getFinalCandidates().entrySet()) {
+        StringBuilder sb = new StringBuilder("Pushdown predicates of ").append(nd.getName())
+            .append(" for alias ").append(e.getKey()).append(": ");
+        boolean isFirst = true;
         for (ExprNodeDesc n : e.getValue()) {
-          LOG.info("\t" + n.getExprString());
+          if (!isFirst) {
+            sb.append("; ");
+          }
+          isFirst = false;
+          sb.append(n.getExprString());
         }
+        LOG.info(sb.toString());
       }
     }
 


[15/55] [abbrv] hive git commit: HIVE-11966 JDBC Driver parsing error when reading principal from ZooKeeper (Vaibhav Gumashta via Alan Gates)

Posted by jx...@apache.org.
HIVE-11966 JDBC Driver parsing error when reading principal from ZooKeeper (Vaibhav Gumashta via Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/de1fe68b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/de1fe68b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/de1fe68b

Branch: refs/heads/master-fixed
Commit: de1fe68b90ff9e29a21095035c7ed02dbbf35f26
Parents: 1357f63
Author: Alan Gates <ga...@hortonworks.com>
Authored: Mon Nov 2 16:01:03 2015 -0800
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Mon Nov 2 16:01:03 2015 -0800

----------------------------------------------------------------------
 jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/de1fe68b/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java b/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
index 7195515..1ca77a1 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
@@ -159,7 +159,7 @@ class ZooKeeperHiveClientHelper {
         }
         // KERBEROS
         // If delegation token is passed from the client side, do not set the principal
-        if (matcher.group(2).equalsIgnoreCase("hive.server2.authentication.kerberos.principal")
+        if (matcher.group(1).equalsIgnoreCase("hive.server2.authentication.kerberos.principal")
             && !(connParams.getSessionVars().containsKey(JdbcConnectionParams.AUTH_TYPE) && connParams
                 .getSessionVars().get(JdbcConnectionParams.AUTH_TYPE)
                 .equalsIgnoreCase(JdbcConnectionParams.AUTH_TOKEN))


[24/55] [abbrv] hive git commit: HIVE-12273: Improve user level explain (Pengcheng Xiong, reviewed by Ashutosh Chauhan, Laljo John Pullokkaran, Eugene Koifman, Prasanth Jayachandran and Wei Zheng)

Posted by jx...@apache.org.
HIVE-12273: Improve user level explain (Pengcheng Xiong, reviewed by Ashutosh Chauhan, Laljo John Pullokkaran, Eugene Koifman, Prasanth Jayachandran and Wei Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6577f55c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6577f55c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6577f55c

Branch: refs/heads/master-fixed
Commit: 6577f55cd7f21568994638399f9c31bef578b5cc
Parents: d5fdeed
Author: Pengcheng Xiong <px...@apache.org>
Authored: Tue Nov 3 10:11:18 2015 -0800
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Tue Nov 3 10:11:18 2015 -0800

----------------------------------------------------------------------
 .../hive/common/jsonexplain/tez/Vertex.java     |   9 +-
 .../apache/hadoop/hive/ql/plan/MapJoinDesc.java |   4 +-
 .../org/apache/hadoop/hive/ql/plan/MapWork.java |   2 +-
 .../apache/hadoop/hive/ql/plan/ReduceWork.java  |   2 +-
 .../hadoop/hive/ql/plan/TableScanDesc.java      |   7 +
 .../test/queries/clientpositive/explainuser_3.q |  46 +++-
 .../clientpositive/llap/constprog_dpp.q.out     |  10 +-
 .../clientpositive/tez/explainuser_1.q.out      |  16 ++
 .../clientpositive/tez/explainuser_2.q.out      |  38 +++
 .../clientpositive/tez/explainuser_3.q.out      | 230 ++++++++++++++++++-
 .../tez/vector_aggregate_without_gby.q.out      |   4 +-
 .../tez/vector_auto_smb_mapjoin_14.q.out        |  32 +--
 .../tez/vectorized_parquet_types.q.out          |   2 +-
 13 files changed, 363 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
index 67ff8eb..be01b8b 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
@@ -50,6 +50,8 @@ public final class Vertex implements Comparable<Vertex>{
   public final List<Vertex> mergeJoinDummyVertexs = new ArrayList<>();
   // whether this vertex has multiple reduce operators
   public boolean hasMultiReduceOp = false;
+  // execution mode
+  public String executionMode = "";
 
   public Vertex(String name, JSONObject vertexObject, TezJsonParser tezJsonParser) {
     super();
@@ -103,6 +105,8 @@ public final class Vertex implements Comparable<Vertex>{
           } else {
             throw new Exception("Merge File Operator does not have a Map Operator Tree");
           }
+        } else if (key.equals("Execution mode:")) {
+          executionMode = " " + vertexObject.getString(key);
         } else {
           throw new Exception("Unsupported operator tree in vertex " + this.name);
         }
@@ -189,9 +193,10 @@ public final class Vertex implements Comparable<Vertex>{
     }
     parser.printSet.add(this);
     if (type != null) {
-      printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + this.name + " [" + type + "]");
+      printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + this.name + " [" + type + "]"
+          + this.executionMode);
     } else if (this.name != null) {
-      printer.println(TezJsonParser.prefixString(indentFlag) + this.name);
+      printer.println(TezJsonParser.prefixString(indentFlag) + this.name + this.executionMode);
     }
     // print operators
     if (hasMultiReduceOp && !callingVertex.union) {

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
index e27b89b..4b93e7c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
@@ -334,7 +334,7 @@ public class MapJoinDesc extends JoinDesc implements Serializable {
     this.bigTablePartSpecToFileMapping = partToFileMapping;
   }
 
-  @Explain(displayName = "BucketMapJoin", explainLevels = { Level.EXTENDED }, displayOnlyOnTrue = true)
+  @Explain(displayName = "BucketMapJoin", explainLevels = { Level.USER, Level.EXTENDED }, displayOnlyOnTrue = true)
   public boolean isBucketMapJoin() {
     return isBucketMapJoin;
   }
@@ -343,7 +343,7 @@ public class MapJoinDesc extends JoinDesc implements Serializable {
     this.isBucketMapJoin = isBucketMapJoin;
   }
 
-  @Explain(displayName = "HybridGraceHashJoin", displayOnlyOnTrue = true)
+  @Explain(displayName = "HybridGraceHashJoin", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, displayOnlyOnTrue = true)
   public boolean isHybridHashJoin() {
     return isHybridHashJoin;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index 87c15a2..d349934 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -340,7 +340,7 @@ public class MapWork extends BaseWork {
     }
   }
 
-  @Explain(displayName = "Execution mode")
+  @Explain(displayName = "Execution mode", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getExecutionMode() {
     if (vectorMode) {
       if (llapMode) {

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
index 0222c23..8211346 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
@@ -146,7 +146,7 @@ public class ReduceWork extends BaseWork {
     this.tagToValueDesc = tagToValueDesc;
   }
 
-  @Explain(displayName = "Execution mode")
+  @Explain(displayName = "Execution mode", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getExecutionMode() {
     if (vectorMode) {
       if (llapMode) {

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
index 6661ce6..be7139c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
@@ -26,6 +26,8 @@ import java.util.Map;
 import org.apache.hadoop.hive.ql.exec.PTFUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.TableSample;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
@@ -135,6 +137,11 @@ public class TableScanDesc extends AbstractOperatorDesc {
     return alias;
   }
 
+  @Explain(displayName = "ACID table", explainLevels = { Level.USER }, displayOnlyOnTrue = true)
+  public boolean isAcidTable() {
+    return SemanticAnalyzer.isAcidTable(this.tableMetadata);
+  }
+
   @Explain(displayName = "filterExpr")
   public String getFilterExprString() {
     StringBuilder sb = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/queries/clientpositive/explainuser_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_3.q b/ql/src/test/queries/clientpositive/explainuser_3.q
index 16237bb..f604d38 100644
--- a/ql/src/test/queries/clientpositive/explainuser_3.q
+++ b/ql/src/test/queries/clientpositive/explainuser_3.q
@@ -1,5 +1,15 @@
 set hive.explain.user=true;
 
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.vectorized.execution.enabled=true;
+
+CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');
+insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10;
+explain select a, b from acid_vectorized order by a, b;
+
 explain select key, value
 FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol;
 
@@ -112,4 +122,38 @@ set hive.merge.mapredfiles=true;
 
 explain insert overwrite table orc_merge5 select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
 
-drop table orc_merge5;
\ No newline at end of file
+drop table orc_merge5;
+
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+
+CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+
+set hive.enforce.bucketing=true;
+set hive.enforce.sorting = true;
+set hive.optimize.bucketingsorting=false;
+insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part;
+
+CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin;
+
+set hive.convert.join.bucket.mapjoin.tez = true;
+explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.key = b.key;
+
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out b/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
index 0bc964b..72a5d0d 100644
--- a/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
+++ b/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
@@ -50,7 +50,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 4
+         Reducer 4 llap
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -60,7 +60,7 @@ Stage-0
             |  keys:{"0":"id (type: int)","1":"_col0 (type: int)"}
             |  outputColumnNames:["_col0"]
             |  Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            |<-Map 6 [SIMPLE_EDGE]
+            |<-Map 6 [SIMPLE_EDGE] llap
             |  Reduce Output Operator [RS_12]
             |     key expressions:id (type: int)
             |     Map-reduce partition columns:id (type: int)
@@ -69,7 +69,7 @@ Stage-0
             |     TableScan [TS_11]
             |        alias:a
             |        Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            |<-Reducer 3 [SIMPLE_EDGE]
+            |<-Reducer 3 [SIMPLE_EDGE] llap
                Reduce Output Operator [RS_13]
                   key expressions:_col0 (type: int)
                   Map-reduce partition columns:_col0 (type: int)
@@ -82,7 +82,7 @@ Stage-0
                      |  outputColumnNames:["_col0"]
                      |  Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                      |<-Union 2 [SIMPLE_EDGE]
-                        |<-Map 1 [CONTAINS]
+                        |<-Map 1 [CONTAINS] llap
                         |  Reduce Output Operator [RS_8]
                         |     sort order:
                         |     Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
@@ -96,7 +96,7 @@ Stage-0
                         |           TableScan [TS_0]
                         |              alias:tb2
                         |              Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                        |<-Map 5 [CONTAINS]
+                        |<-Map 5 [CONTAINS] llap
                            Reduce Output Operator [RS_8]
                               sort order:
                               Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index ec434f0..ee70033 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -4683,6 +4683,7 @@ Stage-0
                                     Statistics:Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
                                     Map Join Operator [MAPJOIN_25]
                                     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |  HybridGraceHashJoin:true
                                     |  keys:{"Map 1":"_col0 (type: string)","Map 4":"_col0 (type: string)"}
                                     |  outputColumnNames:["_col1"]
                                     |  Statistics:Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
@@ -6418,6 +6419,7 @@ Stage-0
             |     value expressions:_col0 (type: string)
             |     Map Join Operator [MAPJOIN_28]
             |     |  condition map:[{"":"Inner Join 0 to 1"}]
+            |     |  HybridGraceHashJoin:true
             |     |  keys:{"Map 1":"_col0 (type: string)","Map 3":"_col0 (type: string)"}
             |     |  outputColumnNames:["_col0","_col1"]
             |     |  Statistics:Num rows: 241 Data size: 42898 Basic stats: COMPLETE Column stats: COMPLETE
@@ -6494,6 +6496,7 @@ Stage-0
             |     value expressions:_col0 (type: string)
             |     Map Join Operator [MAPJOIN_28]
             |     |  condition map:[{"":"Inner Join 0 to 1"}]
+            |     |  HybridGraceHashJoin:true
             |     |  keys:{"Map 1":"_col0 (type: string)","Map 3":"_col0 (type: string)"}
             |     |  outputColumnNames:["_col0","_col1"]
             |     |  Statistics:Num rows: 241 Data size: 42898 Basic stats: COMPLETE Column stats: COMPLETE
@@ -6570,6 +6573,7 @@ Stage-0
             |     value expressions:_col0 (type: string)
             |     Map Join Operator [MAPJOIN_28]
             |     |  condition map:[{"":"Inner Join 0 to 1"}]
+            |     |  HybridGraceHashJoin:true
             |     |  keys:{"Map 1":"_col0 (type: string)","Map 3":"_col0 (type: string)"}
             |     |  outputColumnNames:["_col0","_col1"]
             |     |  Statistics:Num rows: 241 Data size: 42898 Basic stats: COMPLETE Column stats: COMPLETE
@@ -6742,6 +6746,7 @@ Stage-0
                                  value expressions:_col5 (type: int)
                                  Map Join Operator [MAPJOIN_21]
                                  |  condition map:[{"":"Inner Join 0 to 1"}]
+                                 |  HybridGraceHashJoin:true
                                  |  keys:{"Map 1":"p_partkey (type: int)","Map 4":"p_partkey (type: int)"}
                                  |  outputColumnNames:["_col1","_col2","_col5"]
                                  |  Statistics:Num rows: 29 Data size: 6467 Basic stats: COMPLETE Column stats: COMPLETE
@@ -7014,6 +7019,7 @@ Stage-0
             table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Map Join Operator [MAPJOIN_16]
             |  condition map:[{"":"Inner Join 0 to 1"}]
+            |  HybridGraceHashJoin:true
             |  keys:{"Reducer 2":"_col0 (type: int)","Map 3":"p_partkey (type: int)"}
             |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
             |  Statistics:Num rows: 29 Data size: 17951 Basic stats: COMPLETE Column stats: COMPLETE
@@ -7463,6 +7469,7 @@ Stage-0
                         value expressions:_col5 (type: int), _col7 (type: double)
                         Map Join Operator [MAPJOIN_20]
                         |  condition map:[{"":"Inner Join 0 to 1"}]
+                        |  HybridGraceHashJoin:true
                         |  keys:{"Reducer 2":"_col0 (type: int)","Map 4":"p_partkey (type: int)"}
                         |  outputColumnNames:["_col1","_col2","_col5","_col7"]
                         |  Statistics:Num rows: 29 Data size: 6699 Basic stats: COMPLETE Column stats: COMPLETE
@@ -8247,6 +8254,7 @@ Stage-0
                Statistics:Num rows: 3 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                Map Join Operator [MAPJOIN_31]
                |  condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 1 to 2"},{"":"Inner Join 2 to 3"}]
+               |  HybridGraceHashJoin:true
                |  keys:{"Map 1":"key (type: string)","Map 2":"key (type: string)","Map 3":"key (type: string)","Map 4":"key (type: string)"}
                |  outputColumnNames:["_col0","_col1","_col5","_col6","_col10","_col11","_col15","_col16"]
                |  Statistics:Num rows: 3 Data size: 99 Basic stats: COMPLETE Column stats: NONE
@@ -8327,6 +8335,7 @@ Stage-0
                Statistics:Num rows: 3 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                Map Join Operator [MAPJOIN_31]
                |  condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 1 to 2"},{"":"Inner Join 2 to 3"}]
+               |  HybridGraceHashJoin:true
                |  keys:{"Map 1":"key (type: string)","Map 2":"key (type: string)","Map 3":"key (type: string)","Map 4":"key (type: string)"}
                |  outputColumnNames:["_col0","_col1","_col5","_col6","_col10","_col11","_col15","_col16"]
                |  Statistics:Num rows: 3 Data size: 99 Basic stats: COMPLETE Column stats: NONE
@@ -8417,6 +8426,7 @@ Stage-0
                         value expressions:hash(_col0) (type: int), hash(_col1) (type: int), hash(_col5) (type: int)
                         Map Join Operator [MAPJOIN_18]
                         |  condition map:[{"":"Inner Join 0 to 1"}]
+                        |  HybridGraceHashJoin:true
                         |  keys:{"Map 1":"UDFToDouble(key) (type: double)","Map 2":"(key + 1) (type: double)"}
                         |  outputColumnNames:["_col0","_col1","_col5"]
                         |  Statistics:Num rows: 275 Data size: 23925 Basic stats: COMPLETE Column stats: NONE
@@ -8558,6 +8568,7 @@ Stage-0
                         value expressions:hash(_col0) (type: int), hash(_col6) (type: int)
                         Map Join Operator [MAPJOIN_18]
                         |  condition map:[{"":"Inner Join 0 to 1"}]
+                        |  HybridGraceHashJoin:true
                         |  keys:{"Map 1":"key (type: string)","Map 4":"val (type: string)"}
                         |  outputColumnNames:["_col0","_col6"]
                         |  Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
@@ -8621,6 +8632,7 @@ Stage-0
                         value expressions:hash(_col0) (type: int), hash(_col6) (type: int)
                         Map Join Operator [MAPJOIN_18]
                         |  condition map:[{"":"Inner Join 0 to 1"}]
+                        |  HybridGraceHashJoin:true
                         |  keys:{"Map 1":"key (type: string)","Map 4":"key (type: string)"}
                         |  outputColumnNames:["_col0","_col6"]
                         |  Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
@@ -8684,6 +8696,7 @@ Stage-0
                         Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
                         Map Join Operator [MAPJOIN_18]
                         |  condition map:[{"":"Inner Join 0 to 1"}]
+                        |  HybridGraceHashJoin:true
                         |  keys:{"Map 1":"key (type: string)","Map 4":"key (type: string)"}
                         |  Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
                         |<-Map 4 [BROADCAST_EDGE]
@@ -8746,6 +8759,7 @@ Stage-0
                         value expressions:hash(_col0) (type: int), hash(_col1) (type: int), hash(_col5) (type: int)
                         Map Join Operator [MAPJOIN_14]
                         |  condition map:[{"":"Left Outer Join0 to 1"}]
+                        |  HybridGraceHashJoin:true
                         |  keys:{"Map 1":"UDFToDouble(key) (type: double)","Map 4":"(key + 1) (type: double)"}
                         |  outputColumnNames:["_col0","_col1","_col5"]
                         |  Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
@@ -8804,6 +8818,7 @@ Stage-0
                         value expressions:hash(_col0) (type: int), hash(_col1) (type: int), hash(_col5) (type: int)
                         Map Join Operator [MAPJOIN_14]
                         |  condition map:[{"":"Right Outer Join0 to 1"}]
+                        |  HybridGraceHashJoin:true
                         |  keys:{"Map 1":"UDFToDouble(key) (type: double)","Map 2":"(key + 1) (type: double)"}
                         |  outputColumnNames:["_col0","_col1","_col5"]
                         |  Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
@@ -8927,6 +8942,7 @@ Stage-0
                         value expressions:hash(_col0) (type: int), hash(_col6) (type: int)
                         Map Join Operator [MAPJOIN_14]
                         |  condition map:[{"":"Left Outer Join0 to 1"}]
+                        |  HybridGraceHashJoin:true
                         |  keys:{"Map 1":"(key + 1) (type: double)","Map 4":"UDFToDouble(key) (type: double)"}
                         |  outputColumnNames:["_col0","_col6"]
                         |  Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
index 7b361ac..ff055ea 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
@@ -1405,6 +1405,7 @@ Stage-0
                Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                Map Join Operator [MAPJOIN_28]
                |  condition map:[{"":"Inner Join 0 to 1"}]
+               |  HybridGraceHashJoin:true
                |  keys:{"Map 1":"_col3 (type: string)","Map 3":"_col0 (type: string)"}
                |  outputColumnNames:["_col0","_col3","_col6"]
                |  Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
@@ -1426,6 +1427,7 @@ Stage-0
                |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                |<-Map Join Operator [MAPJOIN_27]
                   |  condition map:[{"":"Inner Join 0 to 1"}]
+                  |  HybridGraceHashJoin:true
                   |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col1 (type: string)"}
                   |  outputColumnNames:["_col0","_col3"]
                   |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
@@ -1566,6 +1568,7 @@ Stage-0
                                     Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                                     Map Join Operator [MAPJOIN_110]
                                     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |  HybridGraceHashJoin:true
                                     |  keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 3":"_col15 (type: string), _col17 (type: string)"}
                                     |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
                                     |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
@@ -1578,6 +1581,7 @@ Stage-0
                                     |     value expressions:_col2 (type: string)
                                     |     Map Join Operator [MAPJOIN_104]
                                     |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |     |  HybridGraceHashJoin:true
                                     |     |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col0 (type: string)"}
                                     |     |  outputColumnNames:["_col1","_col2","_col3"]
                                     |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
@@ -1611,6 +1615,7 @@ Stage-0
                                           Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
                                           Map Join Operator [MAPJOIN_109]
                                           |  condition map:[{"":"Inner Join 0 to 1"}]
+                                          |  HybridGraceHashJoin:true
                                           |  keys:{"Map 3":"_col4 (type: string), _col6 (type: string)","Map 10":"_col2 (type: string), _col4 (type: string)"}
                                           |  outputColumnNames:["_col2","_col3","_col14","_col15","_col17"]
                                           |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
@@ -1623,6 +1628,7 @@ Stage-0
                                           |     value expressions:_col3 (type: string), _col5 (type: string)
                                           |     Map Join Operator [MAPJOIN_108]
                                           |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                          |     |  HybridGraceHashJoin:true
                                           |     |  keys:{"Map 9":"_col0 (type: string)","Map 10":"_col0 (type: string)"}
                                           |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
                                           |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
@@ -1653,6 +1659,7 @@ Stage-0
                                           |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                                           |<-Map Join Operator [MAPJOIN_107]
                                              |  condition map:[{"":"Inner Join 0 to 1"}]
+                                             |  HybridGraceHashJoin:true
                                              |  keys:{"Map 3":"_col3 (type: string)","Map 8":"_col1 (type: string)"}
                                              |  outputColumnNames:["_col2","_col3","_col4","_col6"]
                                              |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
@@ -1673,6 +1680,7 @@ Stage-0
                                              |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                                              |<-Map Join Operator [MAPJOIN_106]
                                                 |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                |  HybridGraceHashJoin:true
                                                 |  keys:{"Map 3":"_col2 (type: string)","Map 7":"_col0 (type: string)"}
                                                 |  outputColumnNames:["_col2","_col3","_col4","_col6"]
                                                 |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
@@ -1693,6 +1701,7 @@ Stage-0
                                                 |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                                                 |<-Map Join Operator [MAPJOIN_105]
                                                    |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                   |  HybridGraceHashJoin:true
                                                    |  keys:{"Map 3":"_col1 (type: string)","Map 6":"_col3 (type: string)"}
                                                    |  outputColumnNames:["_col2","_col3","_col4","_col6"]
                                                    |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
@@ -1780,6 +1789,7 @@ Stage-0
                |           Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                |           Map Join Operator [MAPJOIN_85]
                |           |  condition map:[{"":"Inner Join 0 to 1"}]
+               |           |  HybridGraceHashJoin:true
                |           |  keys:{"Reducer 11":"_col2 (type: string)","Map 14":"_col0 (type: string)"}
                |           |  outputColumnNames:["_col1","_col2"]
                |           |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
@@ -1800,6 +1810,7 @@ Stage-0
                |           |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                |           |<-Map Join Operator [MAPJOIN_84]
                |              |  condition map:[{"":"Inner Join 0 to 1"}]
+               |              |  HybridGraceHashJoin:true
                |              |  keys:{"Reducer 11":"_col1 (type: string)","Map 13":"_col1 (type: string)"}
                |              |  outputColumnNames:["_col1","_col2"]
                |              |  Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
@@ -1880,6 +1891,7 @@ Stage-0
                            Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                            Map Join Operator [MAPJOIN_83]
                            |  condition map:[{"":"Inner Join 0 to 1"}]
+                           |  HybridGraceHashJoin:true
                            |  keys:{"Reducer 3":"_col2 (type: string)","Map 8":"_col0 (type: string)"}
                            |  outputColumnNames:["_col1","_col2"]
                            |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
@@ -1900,6 +1912,7 @@ Stage-0
                            |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                            |<-Map Join Operator [MAPJOIN_82]
                               |  condition map:[{"":"Inner Join 0 to 1"}]
+                              |  HybridGraceHashJoin:true
                               |  keys:{"Reducer 3":"_col1 (type: string)","Map 7":"_col1 (type: string)"}
                               |  outputColumnNames:["_col1","_col2"]
                               |  Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
@@ -2042,6 +2055,7 @@ Stage-0
                |           Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                |           Map Join Operator [MAPJOIN_167]
                |           |  condition map:[{"":"Inner Join 0 to 1"}]
+               |           |  HybridGraceHashJoin:true
                |           |  keys:{"Reducer 26":"_col2 (type: string)","Map 31":"_col0 (type: string)"}
                |           |  outputColumnNames:["_col2","_col5"]
                |           |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
@@ -2063,6 +2077,7 @@ Stage-0
                |           |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                |           |<-Map Join Operator [MAPJOIN_166]
                |              |  condition map:[{"":"Inner Join 0 to 1"}]
+               |              |  HybridGraceHashJoin:true
                |              |  keys:{"Reducer 26":"_col1 (type: string)","Map 30":"_col1 (type: string)"}
                |              |  outputColumnNames:["_col2"]
                |              |  Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
@@ -2226,6 +2241,7 @@ Stage-0
                            |           Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                            |           Map Join Operator [MAPJOIN_165]
                            |           |  condition map:[{"":"Inner Join 0 to 1"}]
+                           |           |  HybridGraceHashJoin:true
                            |           |  keys:{"Reducer 15":"_col2 (type: string)","Map 19":"_col0 (type: string)"}
                            |           |  outputColumnNames:["_col2","_col5"]
                            |           |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
@@ -2247,6 +2263,7 @@ Stage-0
                            |           |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                            |           |<-Map Join Operator [MAPJOIN_164]
                            |              |  condition map:[{"":"Inner Join 0 to 1"}]
+                           |              |  HybridGraceHashJoin:true
                            |              |  keys:{"Reducer 15":"_col1 (type: string)","Map 18":"_col1 (type: string)"}
                            |              |  outputColumnNames:["_col2"]
                            |              |  Statistics:Num rows: 209 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
@@ -2361,6 +2378,7 @@ Stage-0
                                        Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                                        Map Join Operator [MAPJOIN_163]
                                        |  condition map:[{"":"Inner Join 0 to 1"}]
+                                       |  HybridGraceHashJoin:true
                                        |  keys:{"Reducer 3":"_col2 (type: string)","Map 10":"_col0 (type: string)"}
                                        |  outputColumnNames:["_col2","_col5"]
                                        |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
@@ -2382,6 +2400,7 @@ Stage-0
                                        |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                                        |<-Map Join Operator [MAPJOIN_162]
                                           |  condition map:[{"":"Inner Join 0 to 1"}]
+                                          |  HybridGraceHashJoin:true
                                           |  keys:{"Reducer 3":"_col1 (type: string)","Map 9":"_col1 (type: string)"}
                                           |  outputColumnNames:["_col2"]
                                           |  Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
@@ -3058,6 +3077,7 @@ Stage-0
          |        Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
          |        Map Join Operator [MAPJOIN_120]
          |        |  condition map:[{"":"Inner Join 0 to 1"}]
+         |        |  HybridGraceHashJoin:true
          |        |  keys:{"Map 17":"_col1 (type: string)","Map 18":"_col0 (type: string)"}
          |        |  outputColumnNames:["_col0","_col3"]
          |        |  Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
@@ -3070,6 +3090,7 @@ Stage-0
          |        |     value expressions:_col0 (type: string), _col3 (type: string)
          |        |     Map Join Operator [MAPJOIN_119]
          |        |     |  condition map:[{"":"Inner Join 0 to 1"}]
+         |        |     |  HybridGraceHashJoin:true
          |        |     |  keys:{"Map 16":"_col0 (type: string)","Map 17":"_col0 (type: string)"}
          |        |     |  outputColumnNames:["_col0","_col1","_col3"]
          |        |     |  Statistics:Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
@@ -3138,6 +3159,7 @@ Stage-0
          |        Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
          |        Map Join Operator [MAPJOIN_120]
          |        |  condition map:[{"":"Inner Join 0 to 1"}]
+         |        |  HybridGraceHashJoin:true
          |        |  keys:{"Map 17":"_col1 (type: string)","Map 19":"_col0 (type: string)"}
          |        |  outputColumnNames:["_col0","_col3"]
          |        |  Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
@@ -3161,6 +3183,7 @@ Stage-0
          |        Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
          |        Map Join Operator [MAPJOIN_120]
          |        |  condition map:[{"":"Inner Join 0 to 1"}]
+         |        |  HybridGraceHashJoin:true
          |        |  keys:{"Map 17":"_col1 (type: string)","Map 20":"_col0 (type: string)"}
          |        |  outputColumnNames:["_col0","_col3"]
          |        |  Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
@@ -3184,6 +3207,7 @@ Stage-0
          |        Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
          |        Map Join Operator [MAPJOIN_120]
          |        |  condition map:[{"":"Inner Join 0 to 1"}]
+         |        |  HybridGraceHashJoin:true
          |        |  keys:{"Map 17":"_col1 (type: string)","Map 21":"_col0 (type: string)"}
          |        |  outputColumnNames:["_col0","_col3"]
          |        |  Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
@@ -3337,6 +3361,7 @@ Stage-0
                      |     Statistics:Num rows: 289 Data size: 3030 Basic stats: COMPLETE Column stats: NONE
                      |     Map Join Operator [MAPJOIN_115]
                      |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                     |     |  HybridGraceHashJoin:true
                      |     |  keys:{"Map 1":"_col0 (type: string)","Map 6":"_col1 (type: string)"}
                      |     |  outputColumnNames:["_col1"]
                      |     |  Statistics:Num rows: 289 Data size: 3030 Basic stats: COMPLETE Column stats: NONE
@@ -3380,6 +3405,7 @@ Stage-0
                            Statistics:Num rows: 289 Data size: 3030 Basic stats: COMPLETE Column stats: NONE
                            Map Join Operator [MAPJOIN_115]
                            |  condition map:[{"":"Inner Join 0 to 1"}]
+                           |  HybridGraceHashJoin:true
                            |  keys:{"Map 5":"_col0 (type: string)","Map 6":"_col1 (type: string)"}
                            |  outputColumnNames:["_col1"]
                            |  Statistics:Num rows: 289 Data size: 3030 Basic stats: COMPLETE Column stats: NONE
@@ -3474,6 +3500,7 @@ Stage-0
                |           Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
                |           Map Join Operator [MAPJOIN_164]
                |           |  condition map:[{"":"Inner Join 0 to 1"}]
+               |           |  HybridGraceHashJoin:true
                |           |  keys:{"Map 24":"_col1 (type: string)","Reducer 31":"_col1 (type: string)"}
                |           |  outputColumnNames:["_col0","_col3"]
                |           |  Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
@@ -3486,6 +3513,7 @@ Stage-0
                |           |     value expressions:_col0 (type: string), _col3 (type: string)
                |           |     Map Join Operator [MAPJOIN_163]
                |           |     |  condition map:[{"":"Inner Join 0 to 1"}]
+               |           |     |  HybridGraceHashJoin:true
                |           |     |  keys:{"Map 23":"_col0 (type: string)","Map 24":"_col0 (type: string)"}
                |           |     |  outputColumnNames:["_col0","_col1","_col3"]
                |           |     |  Statistics:Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
@@ -3832,6 +3860,7 @@ Stage-0
                                              Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
                                              Map Join Operator [MAPJOIN_159]
                                              |  condition map:[{"":"Inner Join 0 to 1"}]
+                                             |  HybridGraceHashJoin:true
                                              |  keys:{"Reducer 3":"_col1 (type: string)","Map 10":"_col1 (type: string)"}
                                              |  outputColumnNames:["_col2"]
                                              |  Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
@@ -3997,6 +4026,7 @@ Stage-5
                      |        Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
                      |        Map Join Operator [MAPJOIN_108]
                      |        |  condition map:[{"":"Inner Join 0 to 1"}]
+                     |        |  HybridGraceHashJoin:true
                      |        |  keys:{"Map 21":"_col1 (type: string)","Map 16":"_col1 (type: string)"}
                      |        |  outputColumnNames:["_col0","_col6"]
                      |        |  Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
@@ -4009,6 +4039,7 @@ Stage-5
                      |        |     value expressions:_col0 (type: string), _col6 (type: string)
                      |        |     Map Join Operator [MAPJOIN_105]
                      |        |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                     |        |     |  HybridGraceHashJoin:true
                      |        |     |  keys:{"Map 20":"key (type: string)","Map 21":"key (type: string)"}
                      |        |     |  outputColumnNames:["_col0","_col1","_col6"]
                      |        |     |  Statistics:Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
@@ -4081,6 +4112,7 @@ Stage-5
                      |        Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
                      |        Map Join Operator [MAPJOIN_108]
                      |        |  condition map:[{"":"Inner Join 0 to 1"}]
+                     |        |  HybridGraceHashJoin:true
                      |        |  keys:{"Map 21":"_col1 (type: string)","Map 17":"_col1 (type: string)"}
                      |        |  outputColumnNames:["_col0","_col6"]
                      |        |  Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
@@ -4114,6 +4146,7 @@ Stage-5
                      |        Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
                      |        Map Join Operator [MAPJOIN_108]
                      |        |  condition map:[{"":"Inner Join 0 to 1"}]
+                     |        |  HybridGraceHashJoin:true
                      |        |  keys:{"Map 21":"_col1 (type: string)","Map 18":"_col1 (type: string)"}
                      |        |  outputColumnNames:["_col0","_col6"]
                      |        |  Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
@@ -4147,6 +4180,7 @@ Stage-5
                      |        Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
                      |        Map Join Operator [MAPJOIN_108]
                      |        |  condition map:[{"":"Inner Join 0 to 1"}]
+                     |        |  HybridGraceHashJoin:true
                      |        |  keys:{"Map 21":"_col1 (type: string)","Map 19":"_col1 (type: string)"}
                      |        |  outputColumnNames:["_col0","_col6"]
                      |        |  Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
@@ -4299,6 +4333,7 @@ Stage-5
                               |     value expressions:_col0 (type: string), _col6 (type: string)
                               |     Map Join Operator [MAPJOIN_103]
                               |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                              |     |  HybridGraceHashJoin:true
                               |     |  keys:{"Map 6":"key (type: string)","Map 7":"key (type: string)"}
                               |     |  outputColumnNames:["_col0","_col1","_col6"]
                               |     |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
@@ -4511,6 +4546,7 @@ Stage-5
                            |           Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
                            |           Map Join Operator [MAPJOIN_160]
                            |           |  condition map:[{"":"Inner Join 0 to 1"}]
+                           |           |  HybridGraceHashJoin:true
                            |           |  keys:{"Map 34":"_col1 (type: string)","Reducer 29":"_col1 (type: string)"}
                            |           |  outputColumnNames:["_col0","_col6"]
                            |           |  Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
@@ -4523,6 +4559,7 @@ Stage-5
                            |           |     value expressions:_col0 (type: string), _col6 (type: string)
                            |           |     Map Join Operator [MAPJOIN_157]
                            |           |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                           |           |     |  HybridGraceHashJoin:true
                            |           |     |  keys:{"Map 33":"key (type: string)","Map 34":"key (type: string)"}
                            |           |     |  outputColumnNames:["_col0","_col1","_col6"]
                            |           |     |  Statistics:Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
@@ -4843,6 +4880,7 @@ Stage-5
                                                    |     value expressions:_col0 (type: string), _col6 (type: string)
                                                    |     Map Join Operator [MAPJOIN_155]
                                                    |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                   |     |  HybridGraceHashJoin:true
                                                    |     |  keys:{"Map 10":"key (type: string)","Map 11":"key (type: string)"}
                                                    |     |  outputColumnNames:["_col0","_col1","_col6"]
                                                    |     |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
index 4f69b3b..880d2ad 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
@@ -1,3 +1,55 @@
+PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_vectorized
+POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_vectorized
+PREHOOK: query: insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_vectorized
+POSTHOOK: query: insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_vectorized
+POSTHOOK: Lineage: acid_vectorized.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_vectorized.b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: explain select a, b from acid_vectorized order by a, b
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a, b from acid_vectorized order by a, b
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+
+Stage-0
+   Fetch Operator
+      limit:-1
+      Stage-1
+         Reducer 2 vectorized
+         File Output Operator [FS_8]
+            compressed:false
+            Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
+            Select Operator [OP_7]
+            |  outputColumnNames:["_col0","_col1"]
+            |  Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
+            |<-Map 1 [SIMPLE_EDGE] vectorized
+               Reduce Output Operator [RS_6]
+                  key expressions:_col0 (type: int), _col1 (type: string)
+                  sort order:++
+                  Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator [OP_5]
+                     outputColumnNames:["_col0","_col1"]
+                     Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
+                     TableScan [TS_0]
+                        ACID table:true
+                        alias:acid_vectorized
+                        Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
+
 PREHOOK: query: explain select key, value
 FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol
 PREHOOK: type: QUERY
@@ -448,15 +500,15 @@ Stage-0
    Fetch Operator
       limit:5
       Stage-1
-         Reducer 2
-         File Output Operator [FS_5]
+         Reducer 2 vectorized
+         File Output Operator [FS_8]
             compressed:false
             Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
             table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
-            Limit [LIM_4]
+            Limit [LIM_7]
                Number of rows:5
                Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
-               Select Operator [SEL_3]
+               Select Operator [OP_6]
                |  outputColumnNames:["_col0","_col1"]
                |  Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [SIMPLE_EDGE]
@@ -506,15 +558,15 @@ Stage-3
                         Stage-8(CONDITIONAL CHILD TASKS: Stage-5, Stage-4, Stage-6)
                            Conditional Operator
                               Stage-1
-                                 Map 1
-                                 File Output Operator [FS_3]
+                                 Map 1 vectorized
+                                 File Output Operator [FS_10]
                                     compressed:false
                                     Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
                                     table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5"}
-                                    Select Operator [SEL_2]
+                                    Select Operator [OP_9]
                                        outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
                                        Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
-                                       Filter Operator [FIL_4]
+                                       Filter Operator [FIL_8]
                                           predicate:(userid <= 13) (type: boolean)
                                           Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
                                           TableScan [TS_0]
@@ -539,3 +591,165 @@ POSTHOOK: query: drop table orc_merge5
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@orc_merge5
 POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab_part
+POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab_part
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab
+POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+Plan not optimized by CBO due to missing statistics. Please check log for more details.
+
+Vertex dependency in root stage
+Map 2 <- Map 1 (CUSTOM_EDGE)
+
+Stage-0
+   Fetch Operator
+      limit:-1
+      Stage-1
+         Map 2
+         File Output Operator [FS_8]
+            compressed:false
+            Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
+            Select Operator [SEL_7]
+               outputColumnNames:["_col0","_col1","_col2"]
+               Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+               Map Join Operator [MAPJOIN_13]
+               |  BucketMapJoin:true
+               |  condition map:[{"":"Inner Join 0 to 1"}]
+               |  HybridGraceHashJoin:true
+               |  keys:{"Map 1":"key (type: int)","Map 2":"key (type: int)"}
+               |  outputColumnNames:["_col0","_col1","_col7"]
+               |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+               |<-Map 1 [CUSTOM_EDGE]
+               |  Reduce Output Operator [RS_3]
+               |     key expressions:key (type: int)
+               |     Map-reduce partition columns:key (type: int)
+               |     sort order:+
+               |     Statistics:Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+               |     value expressions:value (type: string)
+               |     Filter Operator [FIL_11]
+               |        predicate:key is not null (type: boolean)
+               |        Statistics:Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+               |        TableScan [TS_0]
+               |           alias:a
+               |           Statistics:Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+               |<-Filter Operator [FIL_12]
+                     predicate:key is not null (type: boolean)
+                     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                     TableScan [TS_1]
+                        alias:b
+                        Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out b/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out
index 9718871..1d84e3b 100644
--- a/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out
@@ -46,7 +46,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 2
+         Reducer 2 vectorized
          File Output Operator [FS_7]
             compressed:false
             Statistics:Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: NONE
@@ -55,7 +55,7 @@ Stage-0
             |  aggregations:["max(VALUE._col0)","max(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
             |  Statistics:Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: NONE
-            |<-Map 1 [SIMPLE_EDGE]
+            |<-Map 1 [SIMPLE_EDGE] vectorized
                Reduce Output Operator [RS_4]
                   sort order:
                   Statistics:Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out
index 480c4e1..cb6de24 100644
--- a/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out
@@ -63,7 +63,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 2
+         Reducer 2 vectorized
          File Output Operator [FS_13]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -146,7 +146,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 3
+         Reducer 3 vectorized
          File Output Operator [FS_18]
             compressed:false
             Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
@@ -155,7 +155,7 @@ Stage-0
             |  aggregations:["count(VALUE._col0)"]
             |  outputColumnNames:["_col0"]
             |  Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            |<-Reducer 2 [SIMPLE_EDGE]
+            |<-Reducer 2 [SIMPLE_EDGE] vectorized
                Reduce Output Operator [RS_15]
                   sort order:
                   Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
@@ -289,7 +289,7 @@ Stage-0
                |  keys:{"0":"_col0 (type: int)","1":"_col0 (type: int)"}
                |  outputColumnNames:["_col0","_col1","_col3"]
                |  Statistics:Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: NONE
-               |<-Reducer 2 [SIMPLE_EDGE]
+               |<-Reducer 2 [SIMPLE_EDGE] vectorized
                |  Reduce Output Operator [RS_51]
                |     key expressions:_col0 (type: int)
                |     Map-reduce partition columns:_col0 (type: int)
@@ -331,7 +331,7 @@ Stage-0
                |                    TableScan [TS_0]
                |                       alias:a
                |                       Statistics:Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-               |<-Reducer 6 [SIMPLE_EDGE]
+               |<-Reducer 6 [SIMPLE_EDGE] vectorized
                   Reduce Output Operator [RS_53]
                      key expressions:_col0 (type: int)
                      Map-reduce partition columns:_col0 (type: int)
@@ -445,7 +445,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 2
+         Reducer 2 vectorized
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -543,7 +543,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 2
+         Reducer 2 vectorized
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -662,7 +662,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 2
+         Reducer 2 vectorized
          File Output Operator [FS_20]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -774,7 +774,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 2
+         Reducer 2 vectorized
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -863,7 +863,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 3
+         Reducer 3 vectorized
          File Output Operator [FS_14]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -885,7 +885,7 @@ Stage-0
                      |  condition map:[{"":"Inner Join 0 to 1"}]
                      |  keys:{"0":"_col0 (type: int)","1":"_col0 (type: int)"}
                      |  Statistics:Num rows: 5 Data size: 511 Basic stats: COMPLETE Column stats: NONE
-                     |<-Map 1 [SIMPLE_EDGE]
+                     |<-Map 1 [SIMPLE_EDGE] vectorized
                      |  Reduce Output Operator [RS_22]
                      |     key expressions:_col0 (type: int)
                      |     Map-reduce partition columns:_col0 (type: int)
@@ -900,7 +900,7 @@ Stage-0
                      |           TableScan [TS_0]
                      |              alias:a
                      |              Statistics:Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                     |<-Map 4 [SIMPLE_EDGE]
+                     |<-Map 4 [SIMPLE_EDGE] vectorized
                         Reduce Output Operator [RS_25]
                            key expressions:_col0 (type: int)
                            Map-reduce partition columns:_col0 (type: int)
@@ -958,7 +958,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 2
+         Reducer 2 vectorized
          File Output Operator [FS_14]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -1045,7 +1045,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 2
+         Reducer 2 vectorized
          File Output Operator [FS_21]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -1161,7 +1161,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 2
+         Reducer 2 vectorized
          File Output Operator [FS_17]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -1448,7 +1448,7 @@ Stage-4
             Stage-3
                Dependency Collection{}
                   Stage-2
-                     Reducer 2
+                     Reducer 2 vectorized
                      File Output Operator [FS_25]
                         compressed:false
                         Statistics:Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out b/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out
index a7ff528..0cb2270 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out
@@ -249,7 +249,7 @@ Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 3
+         Reducer 3 vectorized
          File Output Operator [FS_10]
             compressed:false
             Statistics:Num rows: 11 Data size: 121 Basic stats: COMPLETE Column stats: NONE


[38/55] [abbrv] hive git commit: HIVE-12317: Emit current database in lineage info (Jimmy, reviewed by Yongzhi)

Posted by jx...@apache.org.
HIVE-12317: Emit current database in lineage info (Jimmy, reviewed by Yongzhi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/11f5d449
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/11f5d449
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/11f5d449

Branch: refs/heads/master-fixed
Commit: 11f5d4495d0e7de772b765dfeb64206ff0ebc1d4
Parents: 6eaad6b
Author: Jimmy Xiang <jx...@apache.org>
Authored: Mon Nov 2 13:43:56 2015 -0800
Committer: Jimmy Xiang <jx...@apache.org>
Committed: Wed Nov 4 07:24:48 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/hooks/LineageLogger.java     |  1 +
 .../clientpositive/cbo_rp_lineage2.q.out        | 68 +++++++++---------
 .../test/results/clientpositive/lineage2.q.out  | 72 ++++++++++----------
 .../test/results/clientpositive/lineage3.q.out  | 60 ++++++++--------
 4 files changed, 101 insertions(+), 100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/11f5d449/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
index 1146cae..178a2de 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
@@ -169,6 +169,7 @@ public class LineageLogger implements ExecuteWithHookContext {
         }
         writer.name("engine").value(
           HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE));
+        writer.name("database").value(ss.getCurrentDatabase());
         writer.name("hash").value(getQueryHash(queryStr));
         writer.name("queryText").value(queryStr);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/11f5d449/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
index 9fc1e7b..41f3d09 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
@@ -5,12 +5,12 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@src2
-{"version":"1.0","engine":"mr","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create table src2 as select key key2, value value2 from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create table src2 as select key key2, value value2 from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 PREHOOK: query: select * from src1 where key is not null and value is not null limit 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select * from src1 where key is not null and value is not null limit 3","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select * from src1 where key is not null and value is not null limit 3","edges":[],"vertices":[]}
 238	val_238
 	
 311	val_311
@@ -18,7 +18,7 @@ PREHOOK: query: select * from src1 where key > 10 and value > 'val' order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 146	val_146
 150	val_150
 213	val_213
@@ -31,17 +31,17 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create table dest1 as select * from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create table dest1 as select * from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 PREHOOK: query: insert into table dest1 select * from src2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert into table dest1 select * from src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert into table dest1 select * from src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: select key k, dest1.value from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select key k, dest1.value from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select key k, dest1.value from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 238	val_238
 	
 311	val_311
@@ -97,7 +97,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"50fa3d1074b3fda37ce11dc6ec92ebf3","queryText":"select key from src1 union select key2 from src2 order by key","edges":[{"sources":[1,2],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"50fa3d1074b3fda37ce11dc6ec92ebf3","queryText":"select key from src1 union select key2 from src2 order by key","edges":[{"sources":[1,2],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
 
 128
 146
@@ -119,7 +119,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a739460bd79c8c91ec35e22c97329769","queryText":"select key k from src1 union select key2 from src2 order by k","edges":[{"sources":[1,2],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a739460bd79c8c91ec35e22c97329769","queryText":"select key k from src1 union select key2 from src2 order by k","edges":[{"sources":[1,2],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
 
 128
 146
@@ -140,7 +140,7 @@ PREHOOK: query: select key, count(1) a from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3901b5e3a164064736b3234355046340","queryText":"select key, count(1) a from dest1 group by key","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3901b5e3a164064736b3234355046340","queryText":"select key, count(1) a from dest1 group by key","edges":[],"vertices":[]}
 	20
 128	2
 146	2
@@ -161,7 +161,7 @@ PREHOOK: query: select key k, count(*) from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"0d5a212f10847aeaab31e8c31121e6d4","queryText":"select key k, count(*) from dest1 group by key","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0d5a212f10847aeaab31e8c31121e6d4","queryText":"select key k, count(*) from dest1 group by key","edges":[],"vertices":[]}
 	20
 128	2
 146	2
@@ -182,7 +182,7 @@ PREHOOK: query: select key k, count(value) from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"56429eccb04ded722f5bd9d9d8cf7260","queryText":"select key k, count(value) from dest1 group by key","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"56429eccb04ded722f5bd9d9d8cf7260","queryText":"select key k, count(value) from dest1 group by key","edges":[],"vertices":[]}
 	20
 128	2
 146	2
@@ -203,7 +203,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"7e1cfc3dece85b41b6f7c46365580cde","queryText":"select value, max(length(key)) from dest1 group by value","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"7e1cfc3dece85b41b6f7c46365580cde","queryText":"select value, max(length(key)) from dest1 group by value","edges":[],"vertices":[]}
 	3
 val_146	3
 val_150	3
@@ -227,7 +227,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value order b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"c6578ce1dd72498c4af33f20f164e483","queryText":"select value, max(length(key)) from dest1 group by value order by value limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"c6578ce1dd72498c4af33f20f164e483","queryText":"select value, max(length(key)) from dest1 group by value order by value limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
 	3
 val_146	3
 val_150	3
@@ -237,7 +237,7 @@ PREHOOK: query: select key, length(value) from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"91fbcea5cb34362071555cd93e8d0abe","queryText":"select key, length(value) from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"length(dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"91fbcea5cb34362071555cd93e8d0abe","queryText":"select key, length(value) from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"length(dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 238	7
 	0
 311	7
@@ -292,7 +292,7 @@ PREHOOK: query: select length(value) + 3 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3d8a347cc9052111cb328938d37b9b03","queryText":"select length(value) + 3 from dest1","edges":[{"sources":[1],"targets":[0],"expression":"(length(dest1.value) + 3)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3d8a347cc9052111cb328938d37b9b03","queryText":"select length(value) + 3 from dest1","edges":[{"sources":[1],"targets":[0],"expression":"(length(dest1.value) + 3)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 10
 3
 10
@@ -347,7 +347,7 @@ PREHOOK: query: select 5 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"bae960bf4376ec00e37258469b17360d","queryText":"select 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"5","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bae960bf4376ec00e37258469b17360d","queryText":"select 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"5","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]}
 5
 5
 5
@@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]}
 15
 15
 15
@@ -461,31 +461,31 @@ PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"386791c174a4999fc916e300b5e76bf2","queryText":"create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.val
 ue2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"386791c174a4999fc916e300b5e76bf2","queryText":"create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertex
 Id":"default.src2.value2"}]}
 PREHOOK: query: insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"e494b771d94800dc3430bf5d0810cd9f","queryText":"insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.s
 rc2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e494b771d94800dc3430bf5d0810cd9f","queryText":"insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN",
 "vertexId":"default.src2.value2"}]}
 PREHOOK: query: insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"efeaddd0d36105b1013b414627850dc2","queryText":"insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.v
 alue2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"efeaddd0d36105b1013b414627850dc2","queryText":"insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vert
 exId":"default.src2.value2"}]}
 PREHOOK: query: insert into table dest2
   select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"e9450a56b3d103642e06bef0e4f0d482","queryText":"insert into table dest2\n  select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,7],"targets":[0,1,2,3],"expression":"(length(src1.value) = (length(src2.value2) + 1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"i
 d":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e9450a56b3d103642e06bef0e4f0d482","queryText":"insert into table dest2\n  select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,7],"targets":[0,1,2,3],"expression":"(length(src1.value) = (length(src2.value2) + 1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"de
 fault.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: select * from src1 where length(key) > 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"4028c94d222d5dd221f651d414386972","queryText":"select * from src1 where length(key) > 2","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"4028c94d222d5dd221f651d414386972","queryText":"select * from src1 where length(key) > 2","edges":[],"vertices":[]}
 238	val_238
 311	val_311
 255	val_255
@@ -503,7 +503,7 @@ PREHOOK: query: select * from src1 where length(key) > 2 and value > 'a'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"5727531f7743cfcd60d634d8c835515f","queryText":"select * from src1 where length(key) > 2 and value > 'a'","edges":[],"vertices":[]}
+{"version":"1.0","engine":"mr","database":"default","hash":"5727531f7743cfcd60d634d8c835515f","queryText":"select * from src1 where length(key) > 2 and value > 'a'","edges":[],"vertices":[]}
 238	val_238
 311	val_311
 255	val_255
@@ -523,14 +523,14 @@ PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest3
-{"version":"1.0","engine":"mr","hash":"a2c4e9a3ec678039814f5d84b1e38ce4","queryText":"create table dest3 as\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest3.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest3.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest3.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest3.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"
 },{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a2c4e9a3ec678039814f5d84b1e38ce4","queryText":"create table dest3 as\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest3.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest3.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest3.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest3.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId"
 :"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: insert overwrite table dest2
   select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"76d84512204ddc576ad4d93f252e4358","queryText":"insert overwrite table dest2\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 3)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1
 .value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"76d84512204ddc576ad4d93f252e4358","queryText":"insert overwrite table dest2\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 3)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","ve
 rtexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: drop table if exists dest_l1
 PREHOOK: type: DROPTABLE
 PREHOOK: query: CREATE TABLE dest_l1(key INT, value STRING) STORED AS TEXTFILE
@@ -552,7 +552,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
 PREHOOK: Output: default@dest_l1
-{"version":"1.0","engine":"mr","hash":"60b589744e2527dd235a6c8168d6a653","queryText":"INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n      FROM src1 t1\n      LEFT OUTER JOIN src p1\n      ON (t1.key = p1.key)\n      UNION ALL\n      SELECT t2.key, p2.value\n      FROM src1 t2\n      LEFT OUTER JOIN src p2\n      ON (t2.key = p2.key)) j","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(key)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"value","edgeType":"PROJECTION"},{"sources":[4,2],"targets":[0,1],"expression":"(null-subquery1:j-subquery1:p1.key = null-subquery1:j-subquery1:t1.key)","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"(null-subquery2:j-subquery2:p2.key = null-subquery2:j-subquery2:t2.key)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.value"},{"id":2,"vertexType":"COLUM
 N","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"60b589744e2527dd235a6c8168d6a653","queryText":"INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n      FROM src1 t1\n      LEFT OUTER JOIN src p1\n      ON (t1.key = p1.key)\n      UNION ALL\n      SELECT t2.key, p2.value\n      FROM src1 t2\n      LEFT OUTER JOIN src p2\n      ON (t2.key = p2.key)) j","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(key)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"value","edgeType":"PROJECTION"},{"sources":[4,2],"targets":[0,1],"expression":"(null-subquery1:j-subquery1:p1.key = null-subquery1:j-subquery1:t1.key)","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"(null-subquery2:j-subquery2:p2.key = null-subquery2:j-subquery2:t2.key)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.value"},{"id":
 2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src.key"}]}
 PREHOOK: query: drop table if exists emp
 PREHOOK: type: DROPTABLE
 PREHOOK: query: drop table if exists dept
@@ -593,7 +593,7 @@ PREHOOK: Input: default@dept
 PREHOOK: Input: default@emp
 PREHOOK: Input: default@project
 PREHOOK: Output: default@tgt
-{"version":"1.0","engine":"mr","hash":"f59797e0422d2e51515063374dfac361","queryText":"INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n  SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n  FROM (\n    SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n    FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n    ) em\n  JOIN dept d ON d.dept_id = em.dept_id\n  ) emd JOIN project p ON emd.dept_id = p.project_id","edges":[{"sources":[6],"targets":[0],"edgeType":"PROJECTION"},{"sources":[7],"targets":[1],"edgeType":"PROJECTION"},{"sources":[8],"targets":[2,3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3,4,5],"expression":"(e.emp_id = m.emp_id)","edgeType":"PREDICATE"},{"sources":[11,12],"targets":[0,1,2,3,4,5],"expression":"(em._col1 = d.dept_id)","edgeType":"PREDICATE"},{"sources":[1
 1,9],"targets":[0,1,2,3,4,5],"expression":"(emd._col4 = p.project_id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.tgt.dept_name"},{"id":1,"vertexType":"COLUMN","vertexId":"default.tgt.name"},{"id":2,"vertexType":"COLUMN","vertexId":"default.tgt.emp_id"},{"id":3,"vertexType":"COLUMN","vertexId":"default.tgt.mgr_id"},{"id":4,"vertexType":"COLUMN","vertexId":"default.tgt.proj_id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.tgt.proj_name"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dept.dept_name"},{"id":7,"vertexType":"COLUMN","vertexId":"default.emp.name"},{"id":8,"vertexType":"COLUMN","vertexId":"default.emp.emp_id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.project.project_id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.project.project_name"},{"id":11,"vertexType":"COLUMN","vertexId":"default.emp.dept_id"},{"id":12,"vertexType":"COLUMN","vertexId":"default.dept.dept_id"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"f59797e0422d2e51515063374dfac361","queryText":"INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n  SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n  FROM (\n    SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n    FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n    ) em\n  JOIN dept d ON d.dept_id = em.dept_id\n  ) emd JOIN project p ON emd.dept_id = p.project_id","edges":[{"sources":[6],"targets":[0],"edgeType":"PROJECTION"},{"sources":[7],"targets":[1],"edgeType":"PROJECTION"},{"sources":[8],"targets":[2,3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3,4,5],"expression":"(e.emp_id = m.emp_id)","edgeType":"PREDICATE"},{"sources":[11,12],"targets":[0,1,2,3,4,5],"expression":"(em._col1 = d.dept_id)","edgeType":"PRED
 ICATE"},{"sources":[11,9],"targets":[0,1,2,3,4,5],"expression":"(emd._col4 = p.project_id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.tgt.dept_name"},{"id":1,"vertexType":"COLUMN","vertexId":"default.tgt.name"},{"id":2,"vertexType":"COLUMN","vertexId":"default.tgt.emp_id"},{"id":3,"vertexType":"COLUMN","vertexId":"default.tgt.mgr_id"},{"id":4,"vertexType":"COLUMN","vertexId":"default.tgt.proj_id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.tgt.proj_name"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dept.dept_name"},{"id":7,"vertexType":"COLUMN","vertexId":"default.emp.name"},{"id":8,"vertexType":"COLUMN","vertexId":"default.emp.emp_id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.project.project_id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.project.project_name"},{"id":11,"vertexType":"COLUMN","vertexId":"default.emp.dept_id"},{"id":12,"vertexType":"COLUMN","vertexId":"default.dept.dept_id"}]}
 PREHOOK: query: drop table if exists dest_l2
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table dest_l2 (id int, c1 tinyint, c2 int, c3 bigint) stored as textfile
@@ -604,7 +604,7 @@ PREHOOK: query: insert into dest_l2 values(0, 1, 100, 10000)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__1
 PREHOOK: Output: default@dest_l2
-{"version":"1.0","engine":"mr","hash":"e001334e3f8384806b0f25a7c303045f","queryText":"insert into dest_l2 values(0, 1, 100, 10000)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1],"expression":"UDFToByte(tmp_values_col2)","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"UDFToInteger(tmp_values_col3)","edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToLong(tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e001334e3f8384806b0f25a7c303045f","queryText":"insert into dest_l2 values(0, 1, 100, 10000)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1],"expression":"UDFToByte(tmp_values_col2)","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"UDFToInteger(tmp_values_col3)","edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToLong(tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
 PREHOOK: query: select * from (
   select c1 + c2 x from dest_l2
   union all
@@ -612,7 +612,7 @@ PREHOOK: query: select * from (
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a2c96a96be9d315ede966be5b45ef20e","queryText":"select * from (\n  select c1 + c2 x from dest_l2\n  union all\n  select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x","edges":[{"sources":[1,2,3],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"v2.x"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a2c96a96be9d315ede966be5b45ef20e","queryText":"select * from (\n  select c1 + c2 x from dest_l2\n  union all\n  select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x","edges":[{"sources":[1,2,3],"targets":[0],"expression":"KEY.reducesinkkey0","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"v2.x"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
 101
 10000
 PREHOOK: query: drop table if exists dest_l3
@@ -625,7 +625,7 @@ PREHOOK: query: insert into dest_l3 values(0, "s1", "s2", 15)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__2
 PREHOOK: Output: default@dest_l3
-{"version":"1.0","engine":"mr","hash":"09df51ba6ba2d07f2304523ee505f094","queryText":"insert into dest_l3 values(0, \"s1\", \"s2\", 15)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToInteger(tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"09df51ba6ba2d07f2304523ee505f094","queryText":"insert into dest_l3 values(0, \"s1\", \"s2\", 15)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToInteger(tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
 PREHOOK: query: select sum(a.c1) over (partition by a.c1 order by a.id)
 from dest_l2 a
 where a.c2 != 10
@@ -634,7 +634,7 @@ having count(a.c2) > 0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"$win$_col_0","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"$win$_col_0","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"}]}
 1
 PREHOOK: query: select sum(a.c1), count(b.c1), b.c2, b.c3
 from dest_l2 a join dest_l3 b on (a.id = b.id)
@@ -646,7 +646,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 PREHOOK: Input: default@dest_l3
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"01879c619517509d9f5b6ead998bb4bb","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[10,7],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and (b.c3 > 0))","edgeType":"PREDICATE"},{"sources":[10],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUM
 N","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"01879c619517509d9f5b6ead998bb4bb","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[10,7],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and (b.c3 > 0))","edgeType":"PREDICATE"},{"sources":[10],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":
 1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"}]}
 1	1	s2	15
 PREHOOK: query: drop table if exists t
 PREHOOK: type: DROPTABLE
@@ -659,7 +659,7 @@ PREHOOK: Input: default@dest_l2
 PREHOOK: Input: default@dest_l3
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t
-{"version":"1.0","engine":"mr","hash":"0d2f15b494111ffe236d5be42a76fa28","queryText":"create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[4,5],"targets":[0,1],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1],"expression":"((a.id > 0) and (b.c3 = 15))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.c2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.t.c3"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0d2f15b494111ffe236d5be42a76fa28","queryText":"create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[4,5],"targets":[0,1],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1],"expression":"((a.id > 0) and (b.c3 = 15))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.c2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.t.c3"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
 PREHOOK: query: SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),
 concat(substr(src1.key,1,1),sum(substr(src1.value,5)))
 from src1
@@ -667,7 +667,7 @@ GROUP BY substr(src1.key,1,1)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"5b1022708124ee2b80f9e2e8a0dcb15c","queryText":"SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)","edges":[{"sources":[3],"targets":[0],"expression":"substr(src1.key, 1, 1)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"count(DISTINCT KEY._col1:0._col0)","edgeType":"PROJECTION"},{"sources":[3,5],"targets":[2],"expression":"concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"_c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"5b1022708124ee2b80f9e2e8a0dcb15c","queryText":"SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)","edges":[{"sources":[3],"targets":[0],"expression":"substr(src1.key, 1, 1)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"count(DISTINCT KEY._col1:0._col0)","edgeType":"PROJECTION"},{"sources":[3,5],"targets":[2],"expression":"concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"_c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 	7	1543.0
 1	3	1296.0
 2	6	21257.0


[36/55] [abbrv] hive git commit: HIVE-12317: Emit current database in lineage info (Jimmy, reviewed by Yongzhi)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/11f5d449/ql/src/test/results/clientpositive/lineage3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out
index ad965c8..fb5e9df 100644
--- a/ql/src/test/results/clientpositive/lineage3.q.out
+++ b/ql/src/test/results/clientpositive/lineage3.q.out
@@ -10,7 +10,7 @@ insert into table d1 select x + length(y)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@d1
-{"version":"1.0","engine":"mr","hash":"4c9b7b8d89403cef78668f15d393e542","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x + length(y)","edges":[{"sources":[1,2],"targets":[0],"expression":"(UDFToInteger(a.ctinyint) + length(a.cstring1))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"4c9b7b8d89403cef78668f15d393e542","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x + length(y)","edges":[{"sources":[1,2],"targets":[0],"expression":"(UDFToInteger(a.ctinyint) + length(a.cstring1))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 PREHOOK: query: drop table if exists d2
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table d2(b varchar(128))
@@ -25,7 +25,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@d1
 PREHOOK: Output: default@d2
-{"version":"1.0","engine":"mr","hash":"8703e4091ebd4c96afd3cac83e3a2957","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[5],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[1],"expression":"(t.x > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint
 "},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"8703e4091ebd4c96afd3cac83e3a2957","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[5],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[1],"expression":"(t.x > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"def
 ault.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
 PREHOOK: query: drop table if exists t
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table t as
@@ -36,7 +36,7 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t
-{"version":"1.0","engine":"mr","hash":"761b3a1f405d8e719d3f0c9147b57a23","queryText":"create table t as\nselect * from\n  (select * from\n     (select key from src1 limit 1) v1) v2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"761b3a1f405d8e719d3f0c9147b57a23","queryText":"create table t as\nselect * from\n  (select * from\n     (select key from src1 limit 1) v1) v2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"}]}
 PREHOOK: query: drop table if exists dest_l1
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table dest_l1(a int, b varchar(128))
@@ -51,7 +51,7 @@ where cint is not null and cint < 0 order by cint, cs limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@dest_l1@ds=today
-{"version":"1.0","engine":"mr","hash":"2b5891d094ff74e23ec6acf5b4990f45","queryText":"insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( alltypesorc.cstring1 AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cint < 0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"2b5891d094ff74e23ec6acf5b4990f45","queryText":"insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( alltypesorc.cstring1 AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cint < 0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
 PREHOOK: query: insert into table dest_l1 partition (ds='tomorrow')
 select min(cint), cast(min(cstring1) as varchar(128)) as cs
 from alltypesorc
@@ -61,13 +61,13 @@ having min(cbigint) > 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@dest_l1@ds=tomorrow
-{"version":"1.0","engine":"mr","hash":"4ad6338a8abfe3fe0342198fcbd1f11d","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[2],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2,4],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},
 {"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"4ad6338a8abfe3fe0342198fcbd1f11d","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[2],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2,4],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"defaul
 t.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 PREHOOK: query: select cint, rank() over(order by cint) from alltypesorc
 where cint > 10 and cint < 10000 limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"351b08ec58591554ec10a6ded68ef25f","queryText":"select cint, rank() over(order by cint) from alltypesorc\nwhere cint > 10 and cint < 10000 limit 10","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3,4,2,5,6,7,8,9,10,11,12,13],"targets":[1],"expression":"(tok_function rank (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cint)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"((alltypesorc.cint > 10) and (alltypesorc.cint < 10000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.a
 lltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"351b08ec58591554ec10a6ded68ef25f","queryText":"select cint, rank() over(order by cint) from alltypesorc\nwhere cint > 10 and cint < 10000 limit 10","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3,4,2,5,6,7,8,9,10,11,12,13],"targets":[1],"expression":"(tok_function rank (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cint)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"((alltypesorc.cint > 10) and (alltypesorc.cint < 10000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN",
 "vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]}
 762	1
 762	1
 762	1
@@ -86,7 +86,7 @@ order by a.ctinyint, a.cint
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"40c3faa7abd1cdb7f12c1047a8a1d2ce","queryText":"select a.ctinyint, a.cint, count(a.cdouble)\n  over(partition by a.ctinyint order by a.cint desc\n    rows between 1 preceding and 1 following)\nfrom alltypesorc a inner join alltypesorc b on a.cint = b.cbigint\norder by a.ctinyint, a.cint","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[3,4,5,6],"targets":[2],"expression":"(tok_function count (. (tok_table_or_col $hdt$_0) cdouble) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) ctinyint)) (tok_orderby (tok_tabsortcolnamedesc (. (tok_table_or_col $hdt$_0) cint)))) (tok_windowrange (preceding 1) (following 1))))","edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":1,"vertexType":"CO
 LUMN","vertexId":"a.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"40c3faa7abd1cdb7f12c1047a8a1d2ce","queryText":"select a.ctinyint, a.cint, count(a.cdouble)\n  over(partition by a.ctinyint order by a.cint desc\n    rows between 1 preceding and 1 following)\nfrom alltypesorc a inner join alltypesorc b on a.cint = b.cbigint\norder by a.ctinyint, a.cint","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[3,4,5,6],"targets":[2],"expression":"(tok_function count (. (tok_table_or_col $hdt$_0) cdouble) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) ctinyint)) (tok_orderby (tok_tabsortcolnamedesc (. (tok_table_or_col $hdt$_0) cint)))) (tok_windowrange (preceding 1) (following 1))))","edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"i
 d":1,"vertexType":"COLUMN","vertexId":"a.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 PREHOOK: query: with v2 as
   (select cdouble, count(cint) over() a,
     sum(cint + cbigint) over(partition by cboolean1) b
@@ -97,7 +97,7 @@ order by cdouble, a, b limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a083a5322b6a83af6f614f299d0361e4","queryText":"with v2 as\n  (select cdouble, count(cint) over() a,\n    sum(cint + cbigint) over(partition by cboolean1) b\n    from (select * from alltypesorc) v1)\nselect cdouble, a, b, a + b, cdouble + a from v2\nwhere cdouble is not null\norder by cdouble, a, b limit 5","edges":[{"sources":[5],"targets":[0],"edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[1],"expression":"(tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[2],"expression":"(tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (
 tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[3],"expression":"((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647)))) + (tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647)))))","edgeType":"PROJECTION"},{"sources":[5,6,7,8,9,10,11,12,13,14,15,16],"targets":[4],"expressio
 n":"(alltypesorc.cdouble + UDFToDouble((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))))","edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3,4],"expression":"alltypesorc.cdouble is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cdouble"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"b"},{"id":3,"vertexType":"COLUMN","vertexId":"c3"},{"id":4,"vertexType":"COLUMN","vertexId":"c4"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltype
 sorc.cbigint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":14,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":15,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":16,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a083a5322b6a83af6f614f299d0361e4","queryText":"with v2 as\n  (select cdouble, count(cint) over() a,\n    sum(cint + cbigint) over(partition by cboolean1) b\n    from (select * from alltypesorc) v1)\nselect cdouble, a, b, a + b, cdouble + a from v2\nwhere cdouble is not null\norder by cdouble, a, b limit 5","edges":[{"sources":[5],"targets":[0],"edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[1],"expression":"(tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[2],"expression":"(tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (
 tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[3],"expression":"((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647)))) + (tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647)))))","edgeType":"PROJECTION"},{"sources":[5,6,7,8,9,10,11,12,13,14,15,16],"ta
 rgets":[4],"expression":"(alltypesorc.cdouble + UDFToDouble((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))))","edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3,4],"expression":"alltypesorc.cdouble is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cdouble"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"b"},{"id":3,"vertexType":"COLUMN","vertexId":"c3"},{"id":4,"vertexType":"COLUMN","vertexId":"c4"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":9,"vertexType":"COLUMN","verte
 xId":"default.alltypesorc.cbigint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":14,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":15,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":16,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]}
 -16379.0	9173	-919551973060	-919551963887	-7206.0
 -16373.0	9173	-919551973060	-919551963887	-7200.0
 -16372.0	9173	-919551973060	-919551963887	-7199.0
@@ -116,7 +116,7 @@ order by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"afd760470fc5aa6d3e8348dee03af97f","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n  (select ctinyint, cbigint from alltypesorc\n   union all\n   select ctinyint, cbigint from alltypesorc) a\n  inner join\n  alltypesorc b\n  on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4],"targets":[0,1,2,3],"expression":"alltypesorc.cbigint is not null","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint < 100)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint = alltypesorc.ctinyint
 )","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100) and alltypesorc.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"afd760470fc5aa6d3e8348dee03af97f","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n  (select ctinyint, cbigint from alltypesorc\n   union all\n   select ctinyint, cbigint from alltypesorc) a\n  inner join\n  alltypesorc b\n  on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4],"targets":[0,1,2,3],"expression":"alltypesorc.cbigint is not null","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint < 100)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint =
  alltypesorc.ctinyint)","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100) and alltypesorc.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
 -2147311592	-51	-1071480828	-51
 -2147311592	-51	-1071480828	-51
 -2147311592	-51	-1067683781	-51
@@ -135,7 +135,7 @@ and x.ctinyint + length(c.cstring2) < 1000
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3a12ad24b2622a8958df12d0bdc60f8a","queryText":"select x.ctinyint, x.cint, c.cbigint-100, c.cstring1\nfrom alltypesorc c\njoin (\n   select a.ctinyint ctinyint, b.cint cint\n   from (select * from alltypesorc a where cboolean1=false) a\n   join alltypesorc b on (a.cint = b.cbigint - 224870380)\n ) x on (x.cint = c.cint)\nwhere x.ctinyint > 10\nand x.cint < 4.5\nand x.ctinyint + length(c.cstring2) < 1000","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"expression":"(c.cbigint - UDFToLong(100))","edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3],"expression":"(UDFToDouble(c.cint) < 4.5)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(c.cint = c.cint)","edgeType":"PREDICATE"},{"sources":[6,5],"targets":[0,1,2,3],"expression":"((c.cbigint - UDFToLong(224870380)) =
  UDFToLong(c.cint))","edgeType":"PREDICATE"},{"sources":[8],"targets":[0,1,2,3],"expression":"(c.cboolean1 = false)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(c.ctinyint > 10)","edgeType":"PREDICATE"},{"sources":[4,9],"targets":[0,1,2,3],"expression":"((UDFToInteger(c.ctinyint) + length(c.cstring2)) < 1000)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"x.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"c.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":9,"vertexType":"COLUMN","vertexId":"default
 .alltypesorc.cstring2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3a12ad24b2622a8958df12d0bdc60f8a","queryText":"select x.ctinyint, x.cint, c.cbigint-100, c.cstring1\nfrom alltypesorc c\njoin (\n   select a.ctinyint ctinyint, b.cint cint\n   from (select * from alltypesorc a where cboolean1=false) a\n   join alltypesorc b on (a.cint = b.cbigint - 224870380)\n ) x on (x.cint = c.cint)\nwhere x.ctinyint > 10\nand x.cint < 4.5\nand x.ctinyint + length(c.cstring2) < 1000","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"expression":"(c.cbigint - UDFToLong(100))","edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3],"expression":"(UDFToDouble(c.cint) < 4.5)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(c.cint = c.cint)","edgeType":"PREDICATE"},{"sources":[6,5],"targets":[0,1,2,3],"expression":"((c.cbigint - UD
 FToLong(224870380)) = UDFToLong(c.cint))","edgeType":"PREDICATE"},{"sources":[8],"targets":[0,1,2,3],"expression":"(c.cboolean1 = false)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(c.ctinyint > 10)","edgeType":"PREDICATE"},{"sources":[4,9],"targets":[0,1,2,3],"expression":"((UDFToInteger(c.ctinyint) + length(c.cstring2)) < 1000)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"x.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"c.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":9,"vertexType":"COLUMN
 ","vertexId":"default.alltypesorc.cstring2"}]}
 11	-654374827	857266369	OEfPnHnIYueoup
 PREHOOK: query: select c1, x2, x3
 from (
@@ -158,7 +158,7 @@ order by x2, c1 desc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"bc64f8bec21631969a17930ec609cde9","queryText":"select c1, x2, x3\nfrom (\n  select c1, min(c2) x2, sum(c3) x3\n  from (\n    select c1, c2, c3\n    from (\n      select cint c1, ctinyint c2, min(cbigint) c3\n      from alltypesorc\n      where cint is not null\n      group by cint, ctinyint\n      order by cint, ctinyint\n      limit 5\n    ) x\n  ) x2\n  group by c1\n) y\nwhere x2 > 0\norder by x2, c1 desc","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"expression":"sum(min(default.alltypesorc.cbigint))","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1,2],"expression":"alltypesorc.cint is not null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2],"expression":"(min(default.alltypesorc.ctinyint) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c1"},
 {"id":1,"vertexType":"COLUMN","vertexId":"x2"},{"id":2,"vertexType":"COLUMN","vertexId":"x3"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bc64f8bec21631969a17930ec609cde9","queryText":"select c1, x2, x3\nfrom (\n  select c1, min(c2) x2, sum(c3) x3\n  from (\n    select c1, c2, c3\n    from (\n      select cint c1, ctinyint c2, min(cbigint) c3\n      from alltypesorc\n      where cint is not null\n      group by cint, ctinyint\n      order by cint, ctinyint\n      limit 5\n    ) x\n  ) x2\n  group by c1\n) y\nwhere x2 > 0\norder by x2, c1 desc","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"expression":"sum(min(default.alltypesorc.cbigint))","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1,2],"expression":"alltypesorc.cint is not null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2],"expression":"(min(default.alltypesorc.ctinyint) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLU
 MN","vertexId":"c1"},{"id":1,"vertexType":"COLUMN","vertexId":"x2"},{"id":2,"vertexType":"COLUMN","vertexId":"x3"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 -1072910839	11	2048385991
 -1073279343	11	-1595604468
 PREHOOK: query: select key, value from src1
@@ -166,7 +166,7 @@ where key in (select key+18 from src1) order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 146	val_146
 273	val_273
 PREHOOK: query: select * from src1 a
@@ -178,7 +178,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 311	val_311
 Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select key, value from src1
@@ -186,7 +186,7 @@ where key not in (select key+18 from src1) order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"
 default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":
 "COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]}
 PREHOOK: query: select * from src1 a
 where not exists
   (select cint from alltypesorc b
@@ -196,7 +196,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src
 1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","v
 ertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 369	
 401	val_401
 406	val_406
@@ -205,7 +205,7 @@ select x, y from t where y > 'v' order by x, y limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"d40d80b93db06c12df9a6ccdc108a9d1","queryText":"with t as (select key x, value y from src1 where key > '2')\nselect x, y from t where y > 'v' order by x, y limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(src1.key > '2')","edgeType":"PREDICATE"},{"sources":[3],"targets":[0,1],"expression":"(src1.value > 'v')","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x"},{"id":1,"vertexType":"COLUMN","vertexId":"y"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"d40d80b93db06c12df9a6ccdc108a9d1","queryText":"with t as (select key x, value y from src1 where key > '2')\nselect x, y from t where y > 'v' order by x, y limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(src1.key > '2')","edgeType":"PREDICATE"},{"sources":[3],"targets":[0,1],"expression":"(src1.value > 'v')","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x"},{"id":1,"vertexType":"COLUMN","vertexId":"y"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 213	val_213
 238	val_238
 255	val_255
@@ -216,7 +216,7 @@ select x, y where y > 'v' order by x, y limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"9180b71a610dbcf5e636a3c03e48ca3b","queryText":"from (select key x, value y from src1 where key > '2') t\nselect x, y where y > 'v' order by x, y limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(src1.key > '2')","edgeType":"PREDICATE"},{"sources":[3],"targets":[0,1],"expression":"(src1.value > 'v')","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x"},{"id":1,"vertexType":"COLUMN","vertexId":"y"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"9180b71a610dbcf5e636a3c03e48ca3b","queryText":"from (select key x, value y from src1 where key > '2') t\nselect x, y where y > 'v' order by x, y limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(src1.key > '2')","edgeType":"PREDICATE"},{"sources":[3],"targets":[0,1],"expression":"(src1.value > 'v')","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x"},{"id":1,"vertexType":"COLUMN","vertexId":"y"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 213	val_213
 238	val_238
 255	val_255
@@ -230,13 +230,13 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v1
-{"version":"1.0","engine":"mr","hash":"a3b2d2665c90fd669400f247f751f081","queryText":"create view dest_v1 as\n  select ctinyint, cint from alltypesorc where ctinyint is not null","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v1.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a3b2d2665c90fd669400f247f751f081","queryText":"create view dest_v1 as\n  select ctinyint, cint from alltypesorc where ctinyint is not null","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v1.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
 PREHOOK: query: select * from dest_v1 order by ctinyint, cint limit 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@dest_v1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"75e07b246069a5541af4a3983500b439","queryText":"select * from dest_v1 order by ctinyint, cint limit 2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v1.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"75e07b246069a5541af4a3983500b439","queryText":"select * from dest_v1 order by ctinyint, cint limit 2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v1.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
 -64	NULL
 -64	NULL
 PREHOOK: query: alter view dest_v1 as select ctinyint from alltypesorc
@@ -244,14 +244,14 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v1
-{"version":"1.0","engine":"mr","hash":"bcab8b0c498b0d94e0967170956392b6","queryText":"alter view dest_v1 as select ctinyint from alltypesorc","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bcab8b0c498b0d94e0967170956392b6","queryText":"alter view dest_v1 as select ctinyint from alltypesorc","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v1.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 PREHOOK: query: select t.ctinyint from (select * from dest_v1 where ctinyint is not null) t
 where ctinyint > 10 order by ctinyint limit 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@dest_v1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"b0192d4da86f4bef38fe7ab1fc607906","queryText":"select t.ctinyint from (select * from dest_v1 where ctinyint is not null) t\nwhere ctinyint > 10 order by ctinyint limit 2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"},{"sources":[1],"targets":[0],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"},{"sources":[1],"targets":[0],"expression":"(alltypesorc.ctinyint > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"t.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"b0192d4da86f4bef38fe7ab1fc607906","queryText":"select t.ctinyint from (select * from dest_v1 where ctinyint is not null) t\nwhere ctinyint > 10 order by ctinyint limit 2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"},{"sources":[1],"targets":[0],"expression":"alltypesorc.ctinyint is not null","edgeType":"PREDICATE"},{"sources":[1],"targets":[0],"expression":"(alltypesorc.ctinyint > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"t.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 11
 11
 PREHOOK: query: drop view if exists dest_v2
@@ -276,7 +276,7 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v2
-{"version":"1.0","engine":"mr","hash":"eda442b42b9c3a9cbdb7aff1984ad2dd","queryText":"create view dest_v2 (a, b) as select c1, x2\nfrom (\n  select c1, min(c2) x2\n  from (\n    select c1, c2, c3\n    from (\n      select cint c1, ctinyint c2, min(cfloat) c3\n      from alltypesorc\n      group by cint, ctinyint\n      order by cint, ctinyint\n      limit 1\n    ) x\n  ) x2\n  group by c1\n) y\norder by x2,c1 desc","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v2.c1"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v2.x2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"eda442b42b9c3a9cbdb7aff1984ad2dd","queryText":"create view dest_v2 (a, b) as select c1, x2\nfrom (\n  select c1, min(c2) x2\n  from (\n    select c1, c2, c3\n    from (\n      select cint c1, ctinyint c2, min(cfloat) c3\n      from alltypesorc\n      group by cint, ctinyint\n      order by cint, ctinyint\n      limit 1\n    ) x\n  ) x2\n  group by c1\n) y\norder by x2,c1 desc","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v2.c1"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v2.x2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 PREHOOK: query: drop view if exists dest_v3
 PREHOOK: type: DROPVIEW
 PREHOOK: query: create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as
@@ -297,7 +297,7 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v3
-{"version":"1.0","engine":"mr","hash":"a0c2481ce1c24895a43a950f93a10da7","queryText":"create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as\n  select x.csmallint, x.cbigint bint1, x.ctinyint, c.cbigint bint2, x.cint, x.cfloat, c.cstring1\n  from alltypesorc c\n  join (\n     select a.csmallint csmallint, a.ctinyint ctinyint, a.cstring2 cstring2,\n           a.cint cint, a.cstring1 ctring1, b.cfloat cfloat, b.cbigint cbigint\n     from ( select * from alltypesorc a where cboolean1=true ) a\n     join alltypesorc b on (a.csmallint = b.cint)\n   ) x on (x.ctinyint = c.cbigint)\n  where x.csmallint=11\n  and x.cint > 899\n  and x.cfloat > 4.5\n  and c.cstring1 < '7'\n  and x.cint + x.cfloat + length(c.cstring1) < 1000","edges":[{"sources":[7],"targets":[0],"edgeType":"PROJECTION"},{"sources":[8],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[9],"targets":[3],"edgeType":"PROJECTION"},{"sources":[10],"targets":[4],"edgeType":"PROJECTION"},{"sources":[11],"targets":[5],"edgeType":"PROJ
 ECTION"},{"sources":[12],"targets":[6],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,3,2,4,5,6],"expression":"(c.cbigint = UDFToLong(x._col1))","edgeType":"PREDICATE"},{"sources":[13],"targets":[0,1,3,2,4,5,6],"expression":"(a.cboolean1 = true)","edgeType":"PREDICATE"},{"sources":[7,10],"targets":[0,1,3,2,4,5,6],"expression":"(UDFToInteger(a._col1) = b.cint)","edgeType":"PREDICATE"},{"sources":[7,10,11,12],"targets":[0,1,3,2,4,5,6],"expression":"((x.csmallint = 11) and (x.cint > 899) and (x.cfloat > 4.5) and (c.cstring1 < '7') and (((x.cint + x.cfloat) + length(c.cstring1)) < 1000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.csmallint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_v3.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_v3.cint"},{"id":5,"vertexType":"
 COLUMN","vertexId":"default.dest_v3.cfloat"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_v3.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a0c2481ce1c24895a43a950f93a10da7","queryText":"create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as\n  select x.csmallint, x.cbigint bint1, x.ctinyint, c.cbigint bint2, x.cint, x.cfloat, c.cstring1\n  from alltypesorc c\n  join (\n     select a.csmallint csmallint, a.ctinyint ctinyint, a.cstring2 cstring2,\n           a.cint cint, a.cstring1 ctring1, b.cfloat cfloat, b.cbigint cbigint\n     from ( select * from alltypesorc a where cboolean1=true ) a\n     join alltypesorc b on (a.csmallint = b.cint)\n   ) x on (x.ctinyint = c.cbigint)\n  where x.csmallint=11\n  and x.cint > 899\n  and x.cfloat > 4.5\n  and c.cstring1 < '7'\n  and x.cint + x.cfloat + length(c.cstring1) < 1000","edges":[{"sources":[7],"targets":[0],"edgeType":"PROJECTION"},{"sources":[8],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[9],"targets":[3],"edgeType":"PROJECTION"},{"sources":[10],"targets":[4],"edgeType":"PROJECTION"},{"sources":[11],"targets"
 :[5],"edgeType":"PROJECTION"},{"sources":[12],"targets":[6],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,3,2,4,5,6],"expression":"(c.cbigint = UDFToLong(x._col1))","edgeType":"PREDICATE"},{"sources":[13],"targets":[0,1,3,2,4,5,6],"expression":"(a.cboolean1 = true)","edgeType":"PREDICATE"},{"sources":[7,10],"targets":[0,1,3,2,4,5,6],"expression":"(UDFToInteger(a._col1) = b.cint)","edgeType":"PREDICATE"},{"sources":[7,10,11,12],"targets":[0,1,3,2,4,5,6],"expression":"((x.csmallint = 11) and (x.cint > 899) and (x.cfloat > 4.5) and (c.cstring1 < '7') and (((x.cint + x.cfloat) + length(c.cstring1)) < 1000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.csmallint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_v3.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_v3.cint"},{
 "id":5,"vertexType":"COLUMN","vertexId":"default.dest_v3.cfloat"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_v3.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"}]}
 PREHOOK: query: alter view dest_v3 as
   select * from (
     select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,
@@ -311,13 +311,13 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v3
-{"version":"1.0","engine":"mr","hash":"949093880975cc807ad1a8003e8a8c7c","queryText":"alter view dest_v3 as\n  select * from (\n    select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,\n      count(b.cstring1) x, b.cboolean1\n    from alltypesorc a join alltypesorc b on (a.cint = b.cint)\n    where a.cboolean2 = true and b.cfloat > 0\n    group by a.ctinyint, a.csmallint, b.cboolean1\n    having count(a.cint) > 10\n    order by a, x, b.cboolean1 limit 10) t","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col a) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col a) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col a) csmallint))))))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[0,1,2],"expressio
 n":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,2],"expression":"((a.cboolean2 = true) and (b.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"949093880975cc807ad1a8003e8a8c7c","queryText":"alter view dest_v3 as\n  select * from (\n    select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,\n      count(b.cstring1) x, b.cboolean1\n    from alltypesorc a join alltypesorc b on (a.cint = b.cint)\n    where a.cboolean2 = true and b.cfloat > 0\n    group by a.ctinyint, a.csmallint, b.cboolean1\n    having count(a.cint) > 10\n    order by a, x, b.cboolean1 limit 10) t","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col a) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col a) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col a) csmallint))))))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"target
 s":[0,1,2],"expression":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,2],"expression":"((a.cboolean2 = true) and (b.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltyp
 esorc.cfloat"}]}
 PREHOOK: query: select * from dest_v3 limit 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@dest_v3
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"40bccc0722002f798d0548b59e369e83","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) csmallint)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2],"expression":"(a.cboolean2 = true)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = a.cint)","edgeType":"PREDICATE"},{"sources":[9],"targets":[0,1,2],"expression":"(a.cfloat > 0.0)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.c
 int) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"40bccc0722002f798d0548b59e369e83","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) csmallint)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2],"expression":"(a.cboolean2 = true)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = a.cint)","edgeType":"PREDICATE"},{"sources":[9],"targets":[0,1,2],"expression":"(a.cfloat > 0.0)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(
 default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]}
 38	216	false
 38	229	true
 PREHOOK: query: drop table if exists src_dp
@@ -348,22 +348,22 @@ PREHOOK: query: insert into dest_dp1 partition (year) select first, word, year f
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_dp
 PREHOOK: Output: default@dest_dp1
-{"version":"1.0","engine":"mr","hash":"b2d38401a3281e74a003d9650df97060","queryText":"insert into dest_dp1 partition (year) select first, word, year from src_dp","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"b2d38401a3281e74a003d9650df97060","queryText":"insert into dest_dp1 partition (year) select first, word, year from src_dp","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
 PREHOOK: query: insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_dp
 PREHOOK: Output: default@dest_dp2
-{"version":"1.0","engine":"mr","hash":"237302d8ffd62b5b71d9544b22de7770","queryText":"insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src_dp.month"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"237302d8ffd62b5b71d9544b22de7770","queryText":"insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src_dp.month"}]}
 PREHOOK: query: insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_dp
 PREHOOK: Output: default@dest_dp2@y=0
-{"version":"1.0","engine":"mr","hash":"63e990b47e7ab4eb6f2ea09dfb7453ff","queryText":"insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[6],"targets":[0,1,2],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"63e990b47e7ab4eb6f2ea09dfb7453ff","queryText":"insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[6],"targets":[0,1,2],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
 PREHOOK: query: insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_dp
 PREHOOK: Output: default@dest_dp3@y=0
-{"version":"1.0","engine":"mr","hash":"6bf71a9d02c0612c63b6f40b15c1e8b3","queryText":"insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"id":7,"vertexType":"CO
 LUMN","vertexId":"default.src_dp.day"},{"id":8,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"6bf71a9d02c0612c63b6f40b15c1e8b3","queryText":"insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"i
 d":7,"vertexType":"COLUMN","vertexId":"default.src_dp.day"},{"id":8,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
 PREHOOK: query: drop table if exists src_dp1
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table src_dp1 (f string, w string, m int)
@@ -385,4 +385,4 @@ PREHOOK: Output: default@dest_dp1@year=0
 PREHOOK: Output: default@dest_dp2
 PREHOOK: Output: default@dest_dp2@y=1
 PREHOOK: Output: default@dest_dp3@y=2
-{"version":"1.0","engine":"mr","hash":"44f16edbf35cfeaf3d4f7b0113a69b74","queryText":"from src_dp, src_dp1\ninsert into dest_dp1 partition (year) select first, word, year\ninsert into dest_dp2 partition (y, m) select first, word, year, month\ninsert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d where year=2\ninsert into dest_dp2 partition (y=1, m) select f, w, m\ninsert into dest_dp1 partition (year=0) select f, w","edges":[{"sources":[11],"targets":[0,1,2],"edgeType":"PROJECTION"},{"sources":[12],"targets":[3,4,5],"edgeType":"PROJECTION"},{"sources":[13],"targets":[6,7],"edgeType":"PROJECTION"},{"sources":[14],"targets":[8,9],"edgeType":"PROJECTION"},{"sources":[15],"targets":[1,0],"edgeType":"PROJECTION"},{"sources":[16],"targets":[4,3],"edgeType":"PROJECTION"},{"sources":[17],"targets":[8],"edgeType":"PROJECTION"},{"sources":[18],"targets":[10],"edgeType":"PROJECTION"},{"sources":[13],"targets":[2,5,9,10],"expression":"(src_dp.year = 2)","edgeType":"PREDI
 CATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":11,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":12,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":13,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":14,"vertexType":"COLUMN","vertexId":"default.src_dp.month
 "},{"id":15,"vertexType":"COLUMN","vertexId":"default.src_dp1.f"},{"id":16,"vertexType":"COLUMN","vertexId":"default.src_dp1.w"},{"id":17,"vertexType":"COLUMN","vertexId":"default.src_dp1.m"},{"id":18,"vertexType":"COLUMN","vertexId":"default.src_dp.day"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"44f16edbf35cfeaf3d4f7b0113a69b74","queryText":"from src_dp, src_dp1\ninsert into dest_dp1 partition (year) select first, word, year\ninsert into dest_dp2 partition (y, m) select first, word, year, month\ninsert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d where year=2\ninsert into dest_dp2 partition (y=1, m) select f, w, m\ninsert into dest_dp1 partition (year=0) select f, w","edges":[{"sources":[11],"targets":[0,1,2],"edgeType":"PROJECTION"},{"sources":[12],"targets":[3,4,5],"edgeType":"PROJECTION"},{"sources":[13],"targets":[6,7],"edgeType":"PROJECTION"},{"sources":[14],"targets":[8,9],"edgeType":"PROJECTION"},{"sources":[15],"targets":[1,0],"edgeType":"PROJECTION"},{"sources":[16],"targets":[4,3],"edgeType":"PROJECTION"},{"sources":[17],"targets":[8],"edgeType":"PROJECTION"},{"sources":[18],"targets":[10],"edgeType":"PROJECTION"},{"sources":[13],"targets":[2,5,9,10],"expression":"(src_dp.year = 
 2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":11,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":12,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":13,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":14,"vertexType":"COLUMN","vertexId":
 "default.src_dp.month"},{"id":15,"vertexType":"COLUMN","vertexId":"default.src_dp1.f"},{"id":16,"vertexType":"COLUMN","vertexId":"default.src_dp1.w"},{"id":17,"vertexType":"COLUMN","vertexId":"default.src_dp1.m"},{"id":18,"vertexType":"COLUMN","vertexId":"default.src_dp.day"}]}


[29/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
index cf975d1..9b9fb71 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
@@ -110,14 +110,14 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	6	6984454.211097692	-4033.445769230769	6967702.8672438458471
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.746153846154	-11712.276923076923	12625.04759999997746
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	3	6984454.211097692	-617.5607769230769	6983219.0895438458462
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2	6984454.211097692	1833.9456923076925	6986288.1567899996925
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
 PREHOOK: query: -- Now add the others...
 EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
@@ -208,11 +208,11 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.211097692	-4033.445769230769	6967702.8672438458471	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	-1941.6364148649	1426.0153418918999	2016.6902366556308	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459	-2325.50327307692295	1707.9424961538462	2415.395441814127
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.746153846154	-11712.276923076923	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.211097692	-617.5607769230769	6983219.0895438458462	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2916536.7443268917	2915005.5249214866	4122440.3477364695	2	6984454.211097692	1833.9456923076925	6986288.1567899996925	3493144.07839499984625	3491310.1327026924	4937458.140118758
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536	1956.576923076922966667	6821.495748565159	6822.606289190924
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	-1941.63641486490000	1426.0153418918999	2016.6902366556308	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590	-2325.503273076922950000	1707.9424961538462	2415.395441814127
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2916536.74432689170000	2915005.5249214866	4122440.3477364695	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250	3493144.078394999846250000	3491310.1327026924	4937458.140118758
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360	1956.576923076922966667	6821.495748565159	6822.606289190924

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
index 88c09d9..2d81305 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
@@ -46,13 +46,13 @@ POSTHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS D
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--13326.0	528534767	true	1969-12-31 15:59:46.674	-13326	528534767	1	-13
--15813.0	528534767	true	1969-12-31 15:59:55.787	-15813	528534767	1	-4
--9566.0	528534767	true	1969-12-31 15:59:44.187	-9566	528534767	1	-16
-15007.0	528534767	true	1969-12-31 15:59:50.434	15007	528534767	1	-10
-7021.0	528534767	true	1969-12-31 16:00:15.007	7021	528534767	1	15
-4963.0	528534767	true	1969-12-31 16:00:07.021	4963	528534767	1	7
--7824.0	528534767	true	1969-12-31 16:00:04.963	-7824	528534767	1	5
--15431.0	528534767	true	1969-12-31 15:59:52.176	-15431	528534767	1	-8
--15549.0	528534767	true	1969-12-31 15:59:44.569	-15549	528534767	1	-15
-5780.0	528534767	true	1969-12-31 15:59:44.451	5780	528534767	1	-16
+-13326.0	528534767	true	1969-12-31 15:59:46.674	-13326.0000000000	528534767.00000000000000	1.00	-13
+-15813.0	528534767	true	1969-12-31 15:59:55.787	-15813.0000000000	528534767.00000000000000	1.00	-4
+-9566.0	528534767	true	1969-12-31 15:59:44.187	-9566.0000000000	528534767.00000000000000	1.00	-16
+15007.0	528534767	true	1969-12-31 15:59:50.434	15007.0000000000	528534767.00000000000000	1.00	-10
+7021.0	528534767	true	1969-12-31 16:00:15.007	7021.0000000000	528534767.00000000000000	1.00	15
+4963.0	528534767	true	1969-12-31 16:00:07.021	4963.0000000000	528534767.00000000000000	1.00	7
+-7824.0	528534767	true	1969-12-31 16:00:04.963	-7824.0000000000	528534767.00000000000000	1.00	5
+-15431.0	528534767	true	1969-12-31 15:59:52.176	-15431.0000000000	528534767.00000000000000	1.00	-8
+-15549.0	528534767	true	1969-12-31 15:59:44.569	-15549.0000000000	528534767.00000000000000	1.00	-15
+5780.0	528534767	true	1969-12-31 15:59:44.451	5780.0000000000	528534767.00000000000000	1.00	-16

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
index 6369bc8..e57d6c1 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
@@ -77,13 +77,13 @@ LIMIT 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_test
 #### A masked pattern was here ####
-1836.441995841977	-1166.027234927254	0.8372697814833714	245972.55810810256	5.6189189189	835	1000	-24	835	true	1000.823076923077	835.6189	1000.823076923077	1969-12-31 16:13:55.618918918
-1856.1322245322462	-1178.5293139292924	0.8372449787014038	251275.4432432497	4.5783783784	844	1011	-13	844	true	1011.5538461538462	844.57837	1011.5538461538462	1969-12-31 16:14:04.578378378
-1858.7575883576155	-1180.196257796231	0.837241711366943	251986.76756757565	5.772972973	845	1012	-12	845	true	1012.9846153846155	845.77295	1012.9846153846155	1969-12-31 16:14:05.772972973
-1862.6956340956693	-1182.6966735966386	0.8372368276344616	253055.6391891997	7.5648648649	847	1015	-9	847	true	1015.1307692307693	847.5649	1015.1307692307693	1969-12-31 16:14:07.564864864
-1883.6985446985233	-1196.0322245322466	0.8372111259286499	258794.49324323673	7.1216216216	857	1026	2	857	true	1026.5769230769233	857.12164	1026.5769230769233	1969-12-31 16:14:17.121621621
-1886.3239085238924	-1197.6991683991848	0.8372079534581902	259516.37432431948	8.3162162162	858	1028	4	858	true	1028.0076923076924	858.3162	1028.0076923076924	1969-12-31 16:14:18.316216216
-1887.636590436577	-1198.532640332654	0.8372063705322131	259877.69189188787	8.9135135135	858	1028	4	858	true	1028.723076923077	858.9135	1028.723076923077	1969-12-31 16:14:18.913513513
-1895.5126819126846	-1203.5334719334692	0.8371969190171343	262050.87567567648	2.4972972973	862	1033	9	862	true	1033.0153846153846	862.4973	1033.0153846153846	1969-12-31 16:14:22.497297297
-1909.9521829522155	-1212.701663201631	0.8371797936946236	266058.54729730723	9.0675675676	869	1040	16	869	true	1040.8846153846155	869.06757	1040.8846153846155	1969-12-31 16:14:29.067567567
-1913.8902286902692	-1215.2020790020384	0.8371751679995797	267156.8270270395	0.8594594595	870	1043	19	870	true	1043.0307692307692	870.85944	1043.0307692307692	1969-12-31 16:14:30.859459459
+1836.44199584197700	-1166.02723492725400	0.8372697814833714	245972.55810810256	5.6189189189	835	1000	-24	835	true	1000.823076923077	835.6189	1000.823076923077	1969-12-31 16:13:55.618918918
+1856.13222453224620	-1178.52931392929240	0.8372449787014038	251275.4432432497	4.5783783784	844	1011	-13	844	true	1011.5538461538462	844.57837	1011.5538461538462	1969-12-31 16:14:04.578378378
+1858.75758835761550	-1180.19625779623100	0.837241711366943	251986.76756757565	5.7729729730	845	1012	-12	845	true	1012.9846153846155	845.77295	1012.9846153846155	1969-12-31 16:14:05.772972973
+1862.69563409566930	-1182.69667359663860	0.8372368276344616	253055.6391891997	7.5648648649	847	1015	-9	847	true	1015.1307692307693	847.5649	1015.1307692307693	1969-12-31 16:14:07.564864864
+1883.69854469852330	-1196.03222453224660	0.8372111259286499	258794.49324323673	7.1216216216	857	1026	2	857	true	1026.5769230769233	857.12164	1026.5769230769233	1969-12-31 16:14:17.121621621
+1886.32390852389240	-1197.69916839918480	0.8372079534581902	259516.37432431948	8.3162162162	858	1028	4	858	true	1028.0076923076924	858.3162	1028.0076923076924	1969-12-31 16:14:18.316216216
+1887.63659043657700	-1198.53264033265400	0.8372063705322131	259877.69189188787	8.9135135135	858	1028	4	858	true	1028.723076923077	858.9135	1028.723076923077	1969-12-31 16:14:18.913513513
+1895.51268191268460	-1203.53347193346920	0.8371969190171343	262050.87567567648	2.4972972973	862	1033	9	862	true	1033.0153846153846	862.4973	1033.0153846153846	1969-12-31 16:14:22.497297297
+1909.95218295221550	-1212.70166320163100	0.8371797936946236	266058.54729730723	9.0675675676	869	1040	16	869	true	1040.8846153846155	869.06757	1040.8846153846155	1969-12-31 16:14:29.067567567
+1913.89022869026920	-1215.20207900203840	0.8371751679995797	267156.8270270395	0.8594594595	870	1043	19	870	true	1043.0307692307692	870.85944	1043.0307692307692	1969-12-31 16:14:30.859459459

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
index cf48a32..d3356ed 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
@@ -155,109 +155,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
index f2aaf8d..c5ab8a7 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
@@ -99,13 +99,13 @@ NULL
 NULL
 NULL
 NULL
-0
-0
-0
-0
-0
-0.123456789
-0.123456789
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1234567890
+0.1234567890
 1.2345678901
 1.2345678901
 1.2345678901
@@ -122,14 +122,14 @@ NULL
 12345.6789012346
 123456.7890123456
 123456.7890123457
-1234567.890123456
+1234567.8901234560
 1234567.8901234568
-12345678.90123456
+12345678.9012345600
 12345678.9012345679
-123456789.0123456
+123456789.0123456000
 123456789.0123456789
-1234567890.123456
-1234567890.123456789
+1234567890.1234560000
+1234567890.1234567890
 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -182,13 +182,13 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0.123456789	1.123456789	-0.876543211
-0.123456789	1.123456789	-0.876543211
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.1234567890	1.1234567890	-0.8765432110
+0.1234567890	1.1234567890	-0.8765432110
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
@@ -205,14 +205,14 @@ NULL	NULL	NULL
 12345.6789012346	12346.6789012346	12344.6789012346
 123456.7890123456	123457.7890123456	123455.7890123456
 123456.7890123457	123457.7890123457	123455.7890123457
-1234567.890123456	1234568.890123456	1234566.890123456
+1234567.8901234560	1234568.8901234560	1234566.8901234560
 1234567.8901234568	1234568.8901234568	1234566.8901234568
-12345678.90123456	12345679.90123456	12345677.90123456
+12345678.9012345600	12345679.9012345600	12345677.9012345600
 12345678.9012345679	12345679.9012345679	12345677.9012345679
-123456789.0123456	123456790.0123456	123456788.0123456
+123456789.0123456000	123456790.0123456000	123456788.0123456000
 123456789.0123456789	123456790.0123456789	123456788.0123456789
-1234567890.123456	1234567891.123456	1234567889.123456
-1234567890.123456789	1234567891.123456789	1234567889.123456789
+1234567890.1234560000	1234567891.1234560000	1234567889.1234560000
+1234567890.1234567890	1234567891.1234567890	1234567889.1234567890
 PREHOOK: query: SELECT dec, dec * 2, dec / 3  FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -265,37 +265,37 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0.123456789	0.246913578	0.041152263
-0.123456789	0.246913578	0.041152263
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.1234567890	0.2469135780	0.041152263000
+0.1234567890	0.2469135780	0.041152263000
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
 12345.6789012346	24691.3578024692	4115.226300411533
 12345.6789012346	24691.3578024692	4115.226300411533
-123456.7890123456	246913.5780246912	41152.2630041152
+123456.7890123456	246913.5780246912	41152.263004115200
 123456.7890123457	246913.5780246914	41152.263004115233
-1234567.890123456	2469135.780246912	411522.630041152
+1234567.8901234560	2469135.7802469120	411522.630041152000
 1234567.8901234568	2469135.7802469136	411522.630041152267
-12345678.90123456	24691357.80246912	4115226.30041152
+12345678.9012345600	24691357.8024691200	4115226.300411520000
 12345678.9012345679	24691357.8024691358	4115226.300411522633
-123456789.0123456	246913578.0246912	41152263.0041152
-123456789.0123456789	246913578.0246913578	41152263.0041152263
-1234567890.123456	2469135780.246912	411522630.041152
-1234567890.123456789	2469135780.246913578	411522630.041152263
+123456789.0123456000	246913578.0246912000	41152263.004115200000
+123456789.0123456789	246913578.0246913578	41152263.004115226300
+1234567890.1234560000	2469135780.2469120000	411522630.041152000000
+1234567890.1234567890	2469135780.2469135780	411522630.041152263000
 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -348,13 +348,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.013717421
-0.123456789	0.013717421
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.1234567890	0.013717421000
+0.1234567890	0.013717421000
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
@@ -371,14 +371,14 @@ NULL	NULL
 12345.6789012346	1371.742100137178
 123456.7890123456	13717.421001371733
 123456.7890123457	13717.421001371744
-1234567.890123456	137174.210013717333
+1234567.8901234560	137174.210013717333
 1234567.8901234568	137174.210013717422
-12345678.90123456	1371742.100137173333
+12345678.9012345600	1371742.100137173333
 12345678.9012345679	1371742.100137174211
-123456789.0123456	13717421.001371733333
-123456789.0123456789	13717421.0013717421
-1234567890.123456	137174210.013717333333
-1234567890.123456789	137174210.013717421
+123456789.0123456000	13717421.001371733333
+123456789.0123456789	13717421.001371742100
+1234567890.1234560000	137174210.013717333333
+1234567890.1234567890	137174210.013717421000
 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -431,13 +431,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.0045724736667
-0.123456789	0.0045724736667
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.1234567890	0.0045724736667
+0.1234567890	0.0045724736667
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
@@ -454,14 +454,14 @@ NULL	NULL
 12345.6789012346	457.2473667123926
 123456.7890123456	4572.4736671239111
 123456.7890123457	4572.4736671239148
-1234567.890123456	45724.7366712391111
+1234567.8901234560	45724.7366712391111
 1234567.8901234568	45724.7366712391407
-12345678.90123456	457247.3667123911111
+12345678.9012345600	457247.3667123911111
 12345678.9012345679	457247.3667123914037
-123456789.0123456	4572473.6671239111111
+123456789.0123456000	4572473.6671239111111
 123456789.0123456789	4572473.6671239140333
-1234567890.123456	45724736.6712391111111
-1234567890.123456789	45724736.6712391403333
+1234567890.1234560000	45724736.6712391111111
+1234567890.1234567890	45724736.6712391403333
 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -514,13 +514,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.015241578750190521
-0.123456789	0.015241578750190521
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.1234567890	0.01524157875019052100
+0.1234567890	0.01524157875019052100
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
@@ -537,14 +537,14 @@ NULL	NULL
 12345.6789012346	152415787.53238916034140423716
 123456.7890123456	15241578753.23881726870921383936
 123456.7890123457	15241578753.23884196006701630849
-1234567.890123456	1524157875323.881726870921383936
+1234567.8901234560	1524157875323.88172687092138393600
 1234567.8901234568	1524157875323.88370217954558146624
-12345678.90123456	152415787532388.1726870921383936
+12345678.9012345600	152415787532388.17268709213839360000
 12345678.9012345679	152415787532388.36774881877789971041
-123456789.0123456	15241578753238817.26870921383936
+123456789.0123456000	15241578753238817.26870921383936000000
 123456789.0123456789	15241578753238836.75019051998750190521
-1234567890.123456	NULL
-1234567890.123456789	NULL
+1234567890.1234560000	NULL
+1234567890.1234567890	NULL
 PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
@@ -637,7 +637,7 @@ POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_precision
 #### A masked pattern was here ####
-12345678901234567890.12345678
+12345678901234567890.123456780000000000
 PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
index 0151b04..5291609 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
@@ -25,7 +25,7 @@ POSTHOOK: query: select * from decimal_tbl_1_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_1_orc
 #### A masked pattern was here ####
-55555
+55555.000000000000000000
 PREHOOK: query: -- EXPLAIN
 -- SELECT dec, round(null), round(null, 0), round(125, null), 
 -- round(1.0/0.0, 0), round(power(-1.0,0.5), 0)
@@ -114,7 +114,7 @@ FROM decimal_tbl_1_orc ORDER BY d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_1_orc
 #### A masked pattern was here ####
-55555	55555	55555	55555	55555	55560	55600	56000	60000	100000	0	0	0
+55555	55555	55555.0	55555.00	55555.000	55560	55600	56000	60000	100000	0	0	0
 PREHOOK: query: create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -143,7 +143,7 @@ POSTHOOK: query: select * from decimal_tbl_2_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_2_orc
 #### A masked pattern was here ####
-125.315	-125.315
+125.315000000000000000	-125.315000000000000000
 PREHOOK: query: EXPLAIN
 SELECT
   round(pos) as p, round(pos, 0),
@@ -226,7 +226,7 @@ FROM decimal_tbl_2_orc ORDER BY p
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_2_orc
 #### A masked pattern was here ####
-125	125	125.3	125.32	125.315	125.315	130	100	0	0	-125	-125	-125.3	-125.32	-125.315	-125.315	-130	-100	0	0
+125	125	125.3	125.32	125.315	125.3150	130	100	0	0	-125	-125	-125.3	-125.32	-125.315	-125.3150	-130	-100	0	0
 PREHOOK: query: create table decimal_tbl_3_orc (dec decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -254,7 +254,7 @@ POSTHOOK: query: select * from decimal_tbl_3_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_3_orc
 #### A masked pattern was here ####
-3.141592653589793
+3.141592653589793000
 PREHOOK: query: EXPLAIN
 SELECT
   round(dec, -15) as d, round(dec, -16),
@@ -381,7 +381,7 @@ FROM decimal_tbl_3_orc ORDER BY d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_3_orc
 #### A masked pattern was here ####
-0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	3	3.1	3.14	3.142	3.1416	3.14159	3.141593	3.1415927	3.14159265	3.141592654	3.1415926536	3.14159265359	3.14159265359	3.1415926535898	3.1415926535898	3.14159265358979	3.141592653589793	3.141592653589793
+0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	3	3.1	3.14	3.142	3.1416	3.14159	3.141593	3.1415927	3.14159265	3.141592654	3.1415926536	3.14159265359	3.141592653590	3.1415926535898	3.1415926535898	3.14159265358979	3.141592653589793	3.1415926535897930
 PREHOOK: query: create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -410,7 +410,7 @@ POSTHOOK: query: select * from decimal_tbl_4_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_4_orc
 #### A masked pattern was here ####
-1809242.3151111344	-1809242.3151111344
+1809242.315111134400000000	-1809242.315111134400000000
 PREHOOK: query: EXPLAIN
 SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9)
 FROM decimal_tbl_4_orc ORDER BY p

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out b/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
index ffdb1c9..7dea1a2 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
@@ -73,16 +73,16 @@ POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_trailing
 #### A masked pattern was here ####
-0	0	0
-1	0	0
+0	0.0000	0.00000000
+1	0.0000	0.00000000
 2	NULL	NULL
-3	1	1
-4	10	10
-5	100	100
-6	1000	1000
-7	10000	10000
-8	100000	100000
-9	NULL	1000000
+3	1.0000	1.00000000
+4	10.0000	10.00000000
+5	100.0000	100.00000000
+6	1000.0000	1000.00000000
+7	10000.0000	10000.00000000
+8	100000.0000	100000.00000000
+9	NULL	1000000.00000000
 10	NULL	NULL
 11	NULL	NULL
 12	NULL	NULL
@@ -91,18 +91,18 @@ POSTHOOK: Input: default@decimal_trailing
 15	NULL	NULL
 16	NULL	NULL
 17	NULL	NULL
-18	1	1
-19	10	10
-20	100	100
-21	1000	1000
-22	100000	10000
-23	0	0
-24	0	0
-25	0	0
-26	0	0
-27	0	0
-28	12313.2	134134.312525
-29	99999.999	134134.31242553
+18	1.0000	1.00000000
+19	10.0000	10.00000000
+20	100.0000	100.00000000
+21	1000.0000	1000.00000000
+22	100000.0000	10000.00000000
+23	0.0000	0.00000000
+24	0.0000	0.00000000
+25	0.0000	0.00000000
+26	0.0000	0.00000000
+27	0.0000	0.00000000
+28	12313.2000	134134.31252500
+29	99999.9990	134134.31242553
 PREHOOK: query: DROP TABLE DECIMAL_TRAILING_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_trailing_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
index cfd2a55..6837b76 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
@@ -92,44 +92,44 @@ POSTHOOK: query: SELECT key + key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-200
-20
-2
-0.2
-0.02
-400
-40
-4
-0
-0.4
-0.04
-0.6
-0.66
-0.666
--0.6
--0.66
--0.666
-2
-4
-6.28
--2.24
--2.24
--2.244
-2.24
-2.244
-248
-250.4
--2510.98
-6.28
-6.28
-6.28
-2
--2469135780.246913578
-2469135780.24691356
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.2000000000
+0.0200000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.4000000000
+0.0400000000
+0.6000000000
+0.6600000000
+0.6660000000
+-0.6000000000
+-0.6600000000
+-0.6660000000
+2.0000000000
+4.0000000000
+6.2800000000
+-2.2400000000
+-2.2400000000
+-2.2440000000
+2.2400000000
+2.2440000000
+248.0000000000
+250.4000000000
+-2510.9800000000
+6.2800000000
+6.2800000000
+6.2800000000
+2.0000000000
+-2469135780.2469135780
+2469135780.2469135600
 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
@@ -172,44 +172,44 @@ POSTHOOK: query: SELECT key + value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-200
-20
-2
-0.1
-0.01
-400
-40
-4
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-2
-4
-6.14
--2.12
--2.12
--12.122
-2.12
-2.122
-248
-250.2
--2510.49
-6.14
-6.14
-7.14
-2
--2469135780.123456789
-2469135780.12345678
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.1000000000
+0.0100000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+2.0000000000
+4.0000000000
+6.1400000000
+-2.1200000000
+-2.1200000000
+-12.1220000000
+2.1200000000
+2.1220000000
+248.0000000000
+250.2000000000
+-2510.4900000000
+6.1400000000
+6.1400000000
+7.1400000000
+2.0000000000
+-2469135780.1234567890
+2469135780.1234567800
 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
@@ -414,44 +414,44 @@ POSTHOOK: query: SELECT key - key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
@@ -494,44 +494,44 @@ POSTHOOK: query: SELECT key - value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-0
-0
-0
-0.1
-0.01
-0
-0
-0
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-0
-0
-0.14
--0.12
--0.12
-9.878
-0.12
-0.122
-0
-0.2
--0.49
-0.14
-0.14
--0.86
-0
--0.123456789
-0.12345678
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1000000000
+0.0100000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.0000000000
+0.0000000000
+0.1400000000
+-0.1200000000
+-0.1200000000
+9.8780000000
+0.1200000000
+0.1220000000
+0.0000000000
+0.2000000000
+-0.4900000000
+0.1400000000
+0.1400000000
+-0.8600000000
+0.0000000000
+-0.1234567890
+0.1234567800
 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
@@ -736,42 +736,42 @@ POSTHOOK: query: SELECT key * key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-19360000
+19360000.00000000000000000000
 NULL
-0
-0
-10000
-100
-1
-0.01
-0.0001
-40000
-400
-4
-0
-0.04
-0.0004
-0.09
-0.1089
-0.110889
-0.09
-0.1089
-0.110889
-1
-4
-9.8596
-1.2544
-1.2544
-1.258884
-1.2544
-1.258884
-15376
-15675.04
-1576255.1401
-9.8596
-9.8596
-9.8596
-1
+0.00000000000000000000
+0.00000000000000000000
+10000.00000000000000000000
+100.00000000000000000000
+1.00000000000000000000
+0.01000000000000000000
+0.00010000000000000000
+40000.00000000000000000000
+400.00000000000000000000
+4.00000000000000000000
+0.00000000000000000000
+0.04000000000000000000
+0.00040000000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+1.00000000000000000000
+4.00000000000000000000
+9.85960000000000000000
+1.25440000000000000000
+1.25440000000000000000
+1.25888400000000000000
+1.25440000000000000000
+1.25888400000000000000
+15376.00000000000000000000
+15675.04000000000000000000
+1576255.14010000000000000000
+9.85960000000000000000
+9.85960000000000000000
+9.85960000000000000000
+1.00000000000000000000
 NULL
 NULL
 PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0
@@ -819,29 +819,29 @@ POSTHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-100	100
-10	10
-1	1
-200	200
-20	20
-2	2
-1	1
-2	2
-3.14	3
--1.12	-1
--1.12	-1
--1.122	-11
-1.12	1
-1.122	1
-124	124
-125.2	125
--1255.49	-1255
-3.14	3
-3.14	3
-3.14	4
-1	1
--1234567890.123456789	-1234567890
-1234567890.12345678	1234567890
+100.0000000000	100
+10.0000000000	10
+1.0000000000	1
+200.0000000000	200
+20.0000000000	20
+2.0000000000	2
+1.0000000000	1
+2.0000000000	2
+3.1400000000	3
+-1.1200000000	-1
+-1.1200000000	-1
+-1.1220000000	-11
+1.1200000000	1
+1.1220000000	1
+124.0000000000	124
+125.2000000000	125
+-1255.4900000000	-1255
+3.1400000000	3
+3.1400000000	3
+3.1400000000	4
+1.0000000000	1
+-1234567890.1234567890	-1234567890
+1234567890.1234567800	1234567890
 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
@@ -884,44 +884,44 @@ POSTHOOK: query: SELECT key * value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--19360000
+-19360000.0000000000
 NULL
-0
-0
-10000
-100
-1
-0
-0
-40000
-400
-4
-0
-0
-0
-0
-0
-0
-0
-0
-0
-1
-4
-9.42
-1.12
-1.12
-12.342
-1.12
-1.122
-15376
-15650
-1575639.95
-9.42
-9.42
-12.56
-1
-1524157875171467887.50190521
-1524157875171467876.3907942
+0.0000000000
+0.0000000000
+10000.0000000000
+100.0000000000
+1.0000000000
+0.0000000000
+0.0000000000
+40000.0000000000
+400.0000000000
+4.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+1.0000000000
+4.0000000000
+9.4200000000
+1.1200000000
+1.1200000000
+12.3420000000
+1.1200000000
+1.1220000000
+15376.0000000000
+15650.0000000000
+1575639.9500000000
+9.4200000000
+9.4200000000
+12.5600000000
+1.0000000000
+1524157875171467887.5019052100
+1524157875171467876.3907942000
 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
@@ -1220,40 +1220,40 @@ POSTHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
 PREHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
@@ -1299,30 +1299,30 @@ POSTHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1
-1
-1
-1
-1
-1
-1
-1
-1
+-1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
 1.046666666666666666667
-1.12
-1.12
-0.102
-1.12
-1.122
-1
-1.0016
+1.120000000000000000000
+1.120000000000000000000
+0.102000000000000000000
+1.120000000000000000000
+1.122000000000000000000
+1.000000000000000000000
+1.001600000000000000000
 1.000390438247011952191
 1.046666666666666666667
 1.046666666666666666667
-0.785
-1
-1.0000000001
-1.00000000009999999271
+0.785000000000000000000
+1.000000000000000000000
+1.000000000100000000000
+1.000000000099999992710
 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
@@ -1516,44 +1516,44 @@ POSTHOOK: query: SELECT abs(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
-0.3
-0.33
-0.333
-1
-2
-3.14
-1.12
-1.12
-1.122
-1.12
-1.122
-124
-125.2
-1255.49
-3.14
-3.14
-3.14
-1
-1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- avg
 EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
@@ -1643,23 +1643,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789	-1234567890.123456789	-1234567890.123456789
--1255	-1255.49	-1255.49	-1255.49
--11	-1.122	-1.122	-1.122
--1	-1.12	-1.12	-2.24
-0	0.02538461538461538461538	0.02538461538462	0.33
-1	1.0484	1.0484	5.242
-2	2	2	4
-3	3.14	3.14	9.42
-4	3.14	3.14	3.14
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125	125.2	125.2	125.2
-200	200	200	200
-4400	-4400	-4400	-4400
-1234567890	1234567890.12345678	1234567890.12345678	1234567890.12345678
+-1234567890	-1234567890.12345678900000000000000	-1234567890.12345678900000	-1234567890.1234567890
+-1255	-1255.49000000000000000000000	-1255.49000000000000	-1255.4900000000
+-11	-1.12200000000000000000000	-1.12200000000000	-1.1220000000
+-1	-1.12000000000000000000000	-1.12000000000000	-2.2400000000
+0	0.02538461538461538461538	0.02538461538462	0.3300000000
+1	1.04840000000000000000000	1.04840000000000	5.2420000000
+2	2.00000000000000000000000	2.00000000000000	4.0000000000
+3	3.14000000000000000000000	3.14000000000000	9.4200000000
+4	3.14000000000000000000000	3.14000000000000	3.1400000000
+10	10.00000000000000000000000	10.00000000000000	10.0000000000
+20	20.00000000000000000000000	20.00000000000000	20.0000000000
+100	100.00000000000000000000000	100.00000000000000	100.0000000000
+124	124.00000000000000000000000	124.00000000000000	124.0000000000
+125	125.20000000000000000000000	125.20000000000000	125.2000000000
+200	200.00000000000000000000000	200.00000000000000	200.0000000000
+4400	-4400.00000000000000000000000	-4400.00000000000000	-4400.0000000000
+1234567890	1234567890.12345678000000000000000	1234567890.12345678000000	1234567890.1234567800
 PREHOOK: query: -- negative
 EXPLAIN SELECT -key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1704,44 +1704,44 @@ POSTHOOK: query: SELECT -key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
--100
--10
--1
--0.1
--0.01
--200
--20
--2
-0
--0.2
--0.02
--0.3
--0.33
--0.333
-0.3
-0.33
-0.333
--1
--2
--3.14
-1.12
-1.12
-1.122
--1.12
--1.122
--124
--125.2
-1255.49
--3.14
--3.14
--3.14
--1
-1234567890.123456789
--1234567890.12345678
+0.0000000000
+0.0000000000
+-100.0000000000
+-10.0000000000
+-1.0000000000
+-0.1000000000
+-0.0100000000
+-200.0000000000
+-20.0000000000
+-2.0000000000
+0.0000000000
+-0.2000000000
+-0.0200000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+-1.0000000000
+-2.0000000000
+-3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+-1.1200000000
+-1.1220000000
+-124.0000000000
+-125.2000000000
+1255.4900000000
+-3.1400000000
+-3.1400000000
+-3.1400000000
+-1.0000000000
+1234567890.1234567890
+-1234567890.1234567800
 PREHOOK: query: -- positive
 EXPLAIN SELECT +key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1773,44 +1773,44 @@ POSTHOOK: query: SELECT +key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-1
-2
-3.14
--1.12
--1.12
--1.122
-1.12
-1.122
-124
-125.2
--1255.49
-3.14
-3.14
-3.14
-1
--1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+-1.1200000000
+-1.1200000000
+-1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+-1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+-1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- ceiling
 EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2019,42 +2019,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.00
 NULL
-0
-0
-100
-10
-1
-0.1
+0.00
+0.00
+100.00
+10.00
+1.00
+0.10
 0.01
-200
-20
-2
-0
-0.2
+200.00
+20.00
+2.00
+0.00
+0.20
 0.02
-0.3
+0.30
 0.33
 0.33
--0.3
+-0.30
 -0.33
 -0.33
-1
-2
+1.00
+2.00
 3.14
 -1.12
 -1.12
 -1.12
 1.12
 1.12
-124
-125.2
+124.00
+125.20
 -1255.49
 3.14
 3.14
 3.14
-1
+1.00
 -1234567890.12
 1234567890.12
 PREHOOK: query: -- power
@@ -2182,44 +2182,44 @@ POSTHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--2199
+-2199.000000000000
 NULL
 NULL
 NULL
-1
-1
-0
-0
-0
-1
-1
-0
+1.000000000000
+1.000000000000
+0.000000000000
+0.000000000000
+0.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
 NULL
-0
-0
-0.1
-0.01
-0.001
-0.1
-0.01
-0.001
-0
-0
-1
--0.12
--0.12
--0.122
-0.44
-0.439
-1
-1
--626.745
-1
-1
-1
-0
--617283944.0617283945
-1
+0.000000000000
+0.000000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.000000000000
+0.000000000000
+1.000000000000
+-0.120000000000
+-0.120000000000
+-0.122000000000
+0.440000000000
+0.439000000000
+1.000000000000
+1.000000000000
+-626.745000000000
+1.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
+-617283944.061728394500
+1.000000000000
 PREHOOK: query: -- stddev, var
 EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value
 PREHOOK: type: QUERY
@@ -2498,7 +2498,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890.123456789
+-1234567890.1234567890
 PREHOOK: query: -- max
 EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2558,7 +2558,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1234567890.12345678
+1234567890.1234567800
 PREHOOK: query: -- count
 EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
index 77dc175..5352885 100644
--- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
@@ -113,56 +113,56 @@ LIMIT 50
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_test
 #### A masked pattern was here ####
--1073051226	-7382.0	-4409.2486486486	-5280.969230769231	-4409.2486486486
--1072081801	8373.0	5001.1702702703	5989.915384615385	5001.1702702703
--1072076362	-5470.0	-3267.2162162162	-3913.1538461538466	-3267.2162162162
--1070883071	-741.0	-442.5972972973	-530.1	-442.5972972973
--1070551679	-947.0	-565.6405405405	-677.4692307692308	-565.6405405405
--1069512165	11417.0	6819.3432432432	8167.546153846154	6819.3432432432
--1069109166	8390.0	5011.3243243243	6002.076923076923	5011.3243243243
--1068623584	-14005.0	-8365.1486486486	-10018.961538461539	-8365.1486486486
--1067386090	-3977.0	-2375.4513513514	-2845.084615384616	-2375.4513513514
--1066922682	-9987.0	-5965.2081081081	-7144.546153846154	-5965.2081081081
--1066226047	-9439.0	-5637.8891891892	-6752.515384615385	-5637.8891891892
--1065117869	2538.0	1515.9405405405	1815.646153846154	1515.9405405405
--1064949302	6454.0	3854.9567567568	4617.092307692308	3854.9567567568
--1063498122	-11480.0	-6856.972972973	-8212.615384615387	-6856.972972973
--1062973443	10541.0	6296.1108108108	7540.869230769231	6296.1108108108
--1061614989	-4234.0	-2528.9567567568	-3028.938461538462	-2528.9567567568
--1061057428	-1085.0	-648.0675675676	-776.1923076923077	-648.0675675676
--1059941909	8782.0	5245.4648648649	6282.507692307693	5245.4648648649
--1059338191	7322.0	4373.4108108108	5238.046153846154	4373.4108108108
--1059047258	12452.0	7437.5459459459	8907.969230769231	7437.5459459459
--1056684111	13991.0	8356.7864864865	10008.946153846155	8356.7864864865
--1055945837	13690.0	8177	9793.615384615387	8177
--1055669248	2570.0	1535.0540540541	1838.538461538462	1535.0540540541
--1055316250	-14990.0	-8953.4864864865	-10723.615384615385	-8953.4864864865
--1053385587	14504.0	8663.2	10375.938461538462	8663.2
--1053238077	-3704.0	-2212.3891891892	-2649.784615384616	-2212.3891891892
--1052745800	-12404.0	-7408.8756756757	-8873.630769230771	-7408.8756756757
--1052322972	-7433.0	-4439.7108108108	-5317.453846153847	-4439.7108108108
--1050684541	-8261.0	-4934.272972973	-5909.792307692308	-4934.272972973
--1050657303	-6999.0	-4180.4837837838	-5006.976923076923	-4180.4837837838
--1050165799	8634.0	5157.0648648649	6176.63076923077	5157.0648648649
+-1073051226	-7382.0	-4409.2486486486	-5280.96923076923100	-4409.2486486486
+-1072081801	8373.0	5001.1702702703	5989.91538461538500	5001.1702702703
+-1072076362	-5470.0	-3267.2162162162	-3913.15384615384660	-3267.2162162162
+-1070883071	-741.0	-442.5972972973	-530.10000000000000	-442.5972972973
+-1070551679	-947.0	-565.6405405405	-677.46923076923080	-565.6405405405
+-1069512165	11417.0	6819.3432432432	8167.54615384615400	6819.3432432432
+-1069109166	8390.0	5011.3243243243	6002.07692307692300	5011.3243243243
+-1068623584	-14005.0	-8365.1486486486	-10018.96153846153900	-8365.1486486486
+-1067386090	-3977.0	-2375.4513513514	-2845.08461538461600	-2375.4513513514
+-1066922682	-9987.0	-5965.2081081081	-7144.54615384615400	-5965.2081081081
+-1066226047	-9439.0	-5637.8891891892	-6752.51538461538500	-5637.8891891892
+-1065117869	2538.0	1515.9405405405	1815.64615384615400	1515.9405405405
+-1064949302	6454.0	3854.9567567568	4617.09230769230800	3854.9567567568
+-1063498122	-11480.0	-6856.9729729730	-8212.61538461538700	-6856.9729729730
+-1062973443	10541.0	6296.1108108108	7540.86923076923100	6296.1108108108
+-1061614989	-4234.0	-2528.9567567568	-3028.93846153846200	-2528.9567567568
+-1061057428	-1085.0	-648.0675675676	-776.19230769230770	-648.0675675676
+-1059941909	8782.0	5245.4648648649	6282.50769230769300	5245.4648648649
+-1059338191	7322.0	4373.4108108108	5238.04615384615400	4373.4108108108
+-1059047258	12452.0	7437.5459459459	8907.96923076923100	7437.5459459459
+-1056684111	13991.0	8356.7864864865	10008.94615384615500	8356.7864864865
+-1055945837	13690.0	8177.0000000000	9793.61538461538700	8177.0000000000
+-1055669248	2570.0	1535.0540540541	1838.53846153846200	1535.0540540541
+-1055316250	-14990.0	-8953.4864864865	-10723.61538461538500	-8953.4864864865
+-1053385587	14504.0	8663.2000000000	10375.93846153846200	8663.2000000000
+-1053238077	-3704.0	-2212.3891891892	-2649.78461538461600	-2212.3891891892
+-1052745800	-12404.0	-7408.8756756757	-8873.63076923077100	-7408.8756756757
+-1052322972	-7433.0	-4439.7108108108	-5317.45384615384700	-4439.7108108108
+-1050684541	-8261.0	-4934.2729729730	-5909.79230769230800	-4934.2729729730
+-1050657303	-6999.0	-4180.4837837838	-5006.97692307692300	-4180.4837837838
+-1050165799	8634.0	5157.0648648649	6176.63076923077000	5157.0648648649
 -1048934049	-524.0	-312.9837837838	-374.86153846153854	-312.9837837838
--1046399794	4130.0	2466.8378378378	2954.5384615384614	2466.8378378378
--1045867222	-8034.0	-4798.6864864865	-5747.400000000001	-4798.6864864865
--1045196363	-5039.0	-3009.7810810811	-3604.823076923077	-3009.7810810811
--1045181724	-5706.0	-3408.1783783784	-4081.9846153846156	-3408.1783783784
--1045087657	-5865.0	-3503.1486486486	-4195.7307692307695	-3503.1486486486
--1044207190	5381.0	3214.0567567568	3849.4846153846156	3214.0567567568
--1044093617	-3422.0	-2043.9513513514	-2448.046153846154	-2043.9513513514
--1043573508	16216.0	9685.772972973	11600.676923076924	9685.772972973
--1043132597	12302.0	7347.9513513514	8800.66153846154	7347.9513513514
--1043082182	9180.0	5483.1891891892	6567.2307692307695	5483.1891891892
--1042805968	5133.0	3065.927027027	3672.0692307692307	3065.927027027
--1042712895	9296.0	5552.4756756757	6650.215384615385	5552.4756756757
--1042396242	9583.0	5723.9	6855.53076923077	5723.9
--1041734429	-836.0	-499.3405405405	-598.0615384615385	-499.3405405405
--1041391389	-12970.0	-7746.9459459459	-9278.538461538463	-7746.9459459459
--1041252354	756.0	451.5567567568	540.8307692307692	451.5567567568
--1039776293	13704.0	8185.3621621622	9803.630769230771	8185.3621621622
--1039762548	-3802.0	-2270.9243243243	-2719.8923076923083	-2270.9243243243
+-1046399794	4130.0	2466.8378378378	2954.53846153846140	2466.8378378378
+-1045867222	-8034.0	-4798.6864864865	-5747.40000000000100	-4798.6864864865
+-1045196363	-5039.0	-3009.7810810811	-3604.82307692307700	-3009.7810810811
+-1045181724	-5706.0	-3408.1783783784	-4081.98461538461560	-3408.1783783784
+-1045087657	-5865.0	-3503.1486486486	-4195.73076923076950	-3503.1486486486
+-1044207190	5381.0	3214.0567567568	3849.48461538461560	3214.0567567568
+-1044093617	-3422.0	-2043.9513513514	-2448.04615384615400	-2043.9513513514
+-1043573508	16216.0	9685.7729729730	11600.67692307692400	9685.7729729730
+-1043132597	12302.0	7347.9513513514	8800.66153846154000	7347.9513513514
+-1043082182	9180.0	5483.1891891892	6567.23076923076950	5483.1891891892
+-1042805968	5133.0	3065.9270270270	3672.06923076923070	3065.9270270270
+-1042712895	9296.0	5552.4756756757	6650.21538461538500	5552.4756756757
+-1042396242	9583.0	5723.9000000000	6855.53076923077000	5723.9000000000
+-1041734429	-836.0	-499.3405405405	-598.06153846153850	-499.3405405405
+-1041391389	-12970.0	-7746.9459459459	-9278.53846153846300	-7746.9459459459
+-1041252354	756.0	451.5567567568	540.83076923076920	451.5567567568
+-1039776293	13704.0	8185.3621621622	9803.63076923077100	8185.3621621622
+-1039762548	-3802.0	-2270.9243243243	-2719.89230769230830	-2270.9243243243
 PREHOOK: query: SELECT sum(hash(*))
   FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test
         WHERE cdecimal1 is not null and cdecimal2 is not null

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/windowing_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_decimal.q.out b/ql/src/test/results/clientpositive/windowing_decimal.q.out
index 60563ba..b157a23 100644
--- a/ql/src/test/results/clientpositive/windowing_decimal.q.out
+++ b/ql/src/test/results/clientpositive/windowing_decimal.q.out
@@ -57,32 +57,32 @@ from part_dec
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@part_dec
 #### A masked pattern was here ####
-Manufacturer#1	1173.15	1173.15	2346.3
-Manufacturer#1	1173.15	1173.15	2346.3
-Manufacturer#1	1414.42	1173.15	3760.72
-Manufacturer#1	1602.59	1173.15	5363.31
-Manufacturer#1	1632.66	1173.15	6995.97
-Manufacturer#1	1753.76	1173.15	8749.73
-Manufacturer#2	1690.68	1690.68	1690.68
-Manufacturer#2	1698.66	1690.68	3389.34
-Manufacturer#2	1701.6	1690.68	5090.94
-Manufacturer#2	1800.7	1690.68	6891.64
-Manufacturer#2	2031.98	1690.68	8923.62
-Manufacturer#3	1190.27	1190.27	1190.27
-Manufacturer#3	1337.29	1190.27	2527.56
-Manufacturer#3	1410.39	1190.27	3937.95
-Manufacturer#3	1671.68	1190.27	5609.63
-Manufacturer#3	1922.98	1190.27	7532.61
-Manufacturer#4	1206.26	1206.26	1206.26
-Manufacturer#4	1290.35	1206.26	2496.61
-Manufacturer#4	1375.42	1206.26	3872.03
-Manufacturer#4	1620.67	1206.26	5492.7
-Manufacturer#4	1844.92	1206.26	7337.62
-Manufacturer#5	1018.1	1018.1	1018.1
-Manufacturer#5	1464.48	1018.1	2482.58
-Manufacturer#5	1611.66	1018.1	4094.24
-Manufacturer#5	1788.73	1018.1	5882.97
-Manufacturer#5	1789.69	1018.1	7672.66
+Manufacturer#1	1173.150	1173.150	2346.300
+Manufacturer#1	1173.150	1173.150	2346.300
+Manufacturer#1	1414.420	1173.150	3760.720
+Manufacturer#1	1602.590	1173.150	5363.310
+Manufacturer#1	1632.660	1173.150	6995.970
+Manufacturer#1	1753.760	1173.150	8749.730
+Manufacturer#2	1690.680	1690.680	1690.680
+Manufacturer#2	1698.660	1690.680	3389.340
+Manufacturer#2	1701.600	1690.680	5090.940
+Manufacturer#2	1800.700	1690.680	6891.640
+Manufacturer#2	2031.980	1690.680	8923.620
+Manufacturer#3	1190.270	1190.270	1190.270
+Manufacturer#3	1337.290	1190.270	2527.560
+Manufacturer#3	1410.390	1190.270	3937.950
+Manufacturer#3	1671.680	1190.270	5609.630
+Manufacturer#3	1922.980	1190.270	7532.610
+Manufacturer#4	1206.260	1206.260	1206.260
+Manufacturer#4	1290.350	1206.260	2496.610
+Manufacturer#4	1375.420	1206.260	3872.030
+Manufacturer#4	1620.670	1206.260	5492.700
+Manufacturer#4	1844.920	1206.260	7337.620
+Manufacturer#5	1018.100	1018.100	1018.100
+Manufacturer#5	1464.480	1018.100	2482.580
+Manufacturer#5	1611.660	1018.100	4094.240
+Manufacturer#5	1788.730	1018.100	5882.970
+Manufacturer#5	1789.690	1018.100	7672.660
 PREHOOK: query: select p_mfgr, p_retailprice, 
 first_value(p_retailprice) over(partition by p_mfgr order by p_retailprice range between 5 preceding and current row) ,
 sum(p_retailprice) over(partition by p_mfgr order by p_retailprice range between 5 preceding and current row)
@@ -97,29 +97,29 @@ from part_dec
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@part_dec
 #### A masked pattern was here ####
-Manufacturer#1	1173.15	1173.15	2346.3
-Manufacturer#1	1173.15	1173.15	2346.3
-Manufacturer#1	1414.42	1414.42	1414.42
-Manufacturer#1	1602.59	1602.59	1602.59
-Manufacturer#1	1632.66	1632.66	1632.66
-Manufacturer#1	1753.76	1753.76	1753.76
-Manufacturer#2	1690.68	1690.68	1690.68
-Manufacturer#2	1698.66	1698.66	1698.66
-Manufacturer#2	1701.6	1698.66	3400.26
-Manufacturer#2	1800.7	1800.7	1800.7
-Manufacturer#2	2031.98	2031.98	2031.98
-Manufacturer#3	1190.27	1190.27	1190.27
-Manufacturer#3	1337.29	1337.29	1337.29
-Manufacturer#3	1410.39	1410.39	1410.39
-Manufacturer#3	1671.68	1671.68	1671.68
-Manufacturer#3	1922.98	1922.98	1922.98
-Manufacturer#4	1206.26	1206.26	1206.26
-Manufacturer#4	1290.35	1290.35	1290.35
-Manufacturer#4	1375.42	1375.42	1375.42
-Manufacturer#4	1620.67	1620.67	1620.67
-Manufacturer#4	1844.92	1844.92	1844.92
-Manufacturer#5	1018.1	1018.1	1018.1
-Manufacturer#5	1464.48	1464.48	1464.48
-Manufacturer#5	1611.66	1611.66	1611.66
-Manufacturer#5	1788.73	1788.73	1788.73
-Manufacturer#5	1789.69	1788.73	3578.42
+Manufacturer#1	1173.150	1173.150	2346.300
+Manufacturer#1	1173.150	1173.150	2346.300
+Manufacturer#1	1414.420	1414.420	1414.420
+Manufacturer#1	1602.590	1602.590	1602.590
+Manufacturer#1	1632.660	1632.660	1632.660
+Manufacturer#1	1753.760	1753.760	1753.760
+Manufacturer#2	1690.680	1690.680	1690.680
+Manufacturer#2	1698.660	1698.660	1698.660
+Manufacturer#2	1701.600	1698.660	3400.260
+Manufacturer#2	1800.700	1800.700	1800.700
+Manufacturer#2	2031.980	2031.980	2031.980
+Manufacturer#3	1190.270	1190.270	1190.270
+Manufacturer#3	1337.290	1337.290	1337.290
+Manufacturer#3	1410.390	1410.390	1410.390
+Manufacturer#3	1671.680	1671.680	1671.680
+Manufacturer#3	1922.980	1922.980	1922.980
+Manufacturer#4	1206.260	1206.260	1206.260
+Manufacturer#4	1290.350	1290.350	1290.350
+Manufacturer#4	1375.420	1375.420	1375.420
+Manufacturer#4	1620.670	1620.670	1620.670
+Manufacturer#4	1844.920	1844.920	1844.920
+Manufacturer#5	1018.100	1018.100	1018.100
+Manufacturer#5	1464.480	1464.480	1464.480
+Manufacturer#5	1611.660	1611.660	1611.660
+Manufacturer#5	1788.730	1788.730	1788.730
+Manufacturer#5	1789.690	1788.730	3578.420

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/windowing_navfn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_navfn.q.out b/ql/src/test/results/clientpositive/windowing_navfn.q.out
index ae7d95a..a79fccc 100644
--- a/ql/src/test/results/clientpositive/windowing_navfn.q.out
+++ b/ql/src/test/results/clientpositive/windowing_navfn.q.out
@@ -287,13 +287,13 @@ POSTHOOK: Input: default@over10k
 65536	98.42
 65536	0.93
 65536	83.48
-65536	75.7
+65536	75.70
 65536	88.04
 65536	94.09
 65536	33.45
 65536	44.41
 65536	22.15
-65536	20.5
+65536	20.50
 65536	58.86
 65536	30.91
 65536	74.47
@@ -310,9 +310,9 @@ POSTHOOK: Input: default@over10k
 65536	80.26
 65536	35.07
 65536	95.88
-65536	30.6
+65536	30.60
 65536	46.97
-65536	58.8
+65536	58.80
 65536	5.72
 65536	29.27
 65536	62.25
@@ -336,7 +336,7 @@ POSTHOOK: Input: default@over10k
 65537	35.86
 65537	47.75
 65537	1.12
-65537	52.9
+65537	52.90
 65537	53.92
 65537	43.45
 65537	7.52
@@ -350,20 +350,20 @@ POSTHOOK: Input: default@over10k
 65537	56.48
 65537	83.21
 65537	56.52
-65537	36.6
-65537	59.7
+65537	36.60
+65537	59.70
 65537	80.14
-65537	66.3
+65537	66.30
 65537	94.87
 65537	40.92
-65537	25.2
+65537	25.20
 65537	7.36
 65538	NULL
 65538	53.35
 65538	54.64
 65538	76.67
 65538	15.17
-65538	1.2
+65538	1.20
 65538	13.71
 65538	81.59
 65538	43.33

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/windowing_rank.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_rank.q.out b/ql/src/test/results/clientpositive/windowing_rank.q.out
index 6a74a8e..67975f3 100644
--- a/ql/src/test/results/clientpositive/windowing_rank.q.out
+++ b/ql/src/test/results/clientpositive/windowing_rank.q.out
@@ -508,16 +508,16 @@ where rnk =  1 limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over10k
 #### A masked pattern was here ####
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
-2013-03-01 09:11:58.70307	0.5	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
+2013-03-01 09:11:58.70307	0.50	1
 PREHOOK: query: select ts, dec, rnk
 from
   (select ts, dec,
@@ -546,16 +546,16 @@ where dec = 89.5 limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over10k
 #### A masked pattern was here ####
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
-2013-03-01 09:11:58.703124	89.5	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
+2013-03-01 09:11:58.703124	89.50	1
 PREHOOK: query: select ts, dec, rnk
 from
   (select ts, dec,
@@ -586,13 +586,13 @@ where rnk = 1 limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over10k
 #### A masked pattern was here ####
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
-2013-03-01 09:11:58.70307	37.3	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1
+2013-03-01 09:11:58.70307	37.30	1

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/windowing_windowspec3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_windowspec3.q.out b/ql/src/test/results/clientpositive/windowing_windowspec3.q.out
index aeb5adc..e311cf9 100644
--- a/ql/src/test/results/clientpositive/windowing_windowspec3.q.out
+++ b/ql/src/test/results/clientpositive/windowing_windowspec3.q.out
@@ -215,18 +215,18 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@emp
 #### A masked pattern was here ####
 10	7839	NULL	5000.0	5000.0	5000.0	NULL	NULL	1687.5	5000.0
-10	7782	50	2450.0	2450.0	1687.5	NULL	1500.0	NULL	2350.0
-10	7934	100	1300.0	1875.0	1687.5	NULL	NULL	NULL	2350.0
-10	7987	150.5	1500.0	1750.0	1687.5	NULL	NULL	NULL	2350.0
-10	7988	200	1500.0	1687.5	1687.5	2450.0	NULL	NULL	2350.0
+10	7782	50.00	2450.0	2450.0	1687.5	NULL	1500.0	NULL	2350.0
+10	7934	100.00	1300.0	1875.0	1687.5	NULL	NULL	NULL	2350.0
+10	7987	150.50	1500.0	1750.0	1687.5	NULL	NULL	NULL	2350.0
+10	7988	200.00	1500.0	1687.5	1687.5	2450.0	NULL	NULL	2350.0
 20	7788	NULL	3000.0	1975.0	1975.0	NULL	NULL	2975.0	1975.0
 20	7902	NULL	3000.0	1975.0	1975.0	NULL	NULL	2975.0	1975.0
 20	7876	NULL	1100.0	1975.0	1975.0	NULL	NULL	2975.0	1975.0
 20	7369	NULL	800.0	1975.0	1975.0	NULL	NULL	2975.0	1975.0
-20	7566	100	2975.0	2975.0	2975.0	NULL	NULL	NULL	2175.0
+20	7566	100.00	2975.0	2975.0	2975.0	NULL	NULL	NULL	2175.0
 30	7900	NULL	950.0	1900.0	1900.0	NULL	NULL	1400.0	1900.0
 30	7698	NULL	2850.0	1900.0	1900.0	NULL	NULL	1400.0	1900.0
-30	7499	200.5	1600.0	1600.0	1450.0	NULL	NULL	1250.0	1630.0
-30	7844	300	1500.0	1550.0	1400.0	NULL	1250.0	NULL	1566.6666666666667
-30	7521	300.5	1250.0	1450.0	1400.0	NULL	1250.0	NULL	1566.6666666666667
-30	7654	500	1250.0	1333.3333333333333	1333.3333333333333	1375.0	NULL	NULL	1566.6666666666667
+30	7499	200.50	1600.0	1600.0	1450.0	NULL	NULL	1250.0	1630.0
+30	7844	300.00	1500.0	1550.0	1400.0	NULL	1250.0	NULL	1566.6666666666667
+30	7521	300.50	1250.0	1450.0	1400.0	NULL	1250.0	NULL	1566.6666666666667
+30	7654	500.00	1250.0	1333.3333333333333	1333.3333333333333	1375.0	NULL	NULL	1566.6666666666667

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java
index 709e53f..9ea6e91 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java
@@ -366,7 +366,7 @@ public final class BinarySortableSerializeWrite implements SerializeWrite {
    * DECIMAL.
    */
   @Override
-  public void writeHiveDecimal(HiveDecimal dec) throws IOException {
+  public void writeHiveDecimal(HiveDecimal dec, int scale) throws IOException {
     final boolean invert = columnSortOrderIsDesc[++index];
 
     // This field is not a null.

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java b/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java
index e6fb8b6..21daa8b 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/fast/SerializeWrite.java
@@ -151,5 +151,5 @@ public interface SerializeWrite {
   /*
    * DECIMAL.
    */
-  void writeHiveDecimal(HiveDecimal dec) throws IOException;
+  void writeHiveDecimal(HiveDecimal dec, int scale) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java
index 40601c0..4e82e9b 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveDecimal.java
@@ -102,12 +102,12 @@ public class LazyHiveDecimal extends LazyPrimitive<LazyHiveDecimalObjectInspecto
    * @param hiveDecimal
    * @throws IOException
    */
-  public static void writeUTF8(OutputStream outputStream, HiveDecimal hiveDecimal)
+  public static void writeUTF8(OutputStream outputStream, HiveDecimal hiveDecimal, int scale)
     throws IOException {
     if (hiveDecimal == null) {
       outputStream.write(nullBytes);
     } else {
-      ByteBuffer b = Text.encode(hiveDecimal.toString());
+      ByteBuffer b = Text.encode(hiveDecimal.toFormatString(scale));
       outputStream.write(b.array(), 0, b.limit());
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
index d6b2219..29d6ad8 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
@@ -304,8 +304,9 @@ public final class LazyUtils {
       break;
     }
     case DECIMAL: {
+      HiveDecimalObjectInspector decimalOI = (HiveDecimalObjectInspector) oi;
       LazyHiveDecimal.writeUTF8(out,
-        ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o));
+        decimalOI.getPrimitiveJavaObject(o), decimalOI.scale());
       break;
     }
     default: {

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
index 986d246..b64a803 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.serde2.lazy.fast;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.nio.charset.CharacterCodingException;
 import java.sql.Date;
 import java.sql.Timestamp;
 
@@ -34,7 +33,6 @@ import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
-import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.io.HiveIntervalDayTimeWritable;
 import org.apache.hadoop.hive.serde2.io.HiveIntervalYearMonthWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
@@ -47,13 +45,6 @@ import org.apache.hadoop.hive.serde2.lazy.LazyLong;
 import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters;
 import org.apache.hadoop.hive.serde2.lazy.LazyTimestamp;
 import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
-import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyObjectInspectorParameters;
-import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
 import org.apache.hadoop.hive.serde2.fast.SerializeWrite;
 import org.apache.hadoop.io.Text;
 import org.apache.hive.common.util.DateUtils;
@@ -506,13 +497,12 @@ public final class LazySimpleSerializeWrite implements SerializeWrite {
    * DECIMAL.
    */
   @Override
-  public void writeHiveDecimal(HiveDecimal v) throws IOException {
-
+  public void writeHiveDecimal(HiveDecimal v, int scale) throws IOException {
     if (index > 0) {
       output.write(separator);
     }
 
-    LazyHiveDecimal.writeUTF8(output, v);
+    LazyHiveDecimal.writeUTF8(output, v, scale);
 
     index++;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java
index ebe4181..8f81df6 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/fast/LazyBinarySerializeWrite.java
@@ -713,7 +713,7 @@ public class LazyBinarySerializeWrite implements SerializeWrite {
    * DECIMAL.
    */
   @Override
-  public void writeHiveDecimal(HiveDecimal v) throws IOException {
+  public void writeHiveDecimal(HiveDecimal v, int scale) throws IOException {
 
     // Every 8 fields we write a NULL byte.
     if ((fieldIndex % 8) == 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java b/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java
index fa46c9e..fc845a5 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/VerifyFast.java
@@ -259,12 +259,12 @@ public class VerifyFast {
     }
   }
 
-  public static void serializeWrite(SerializeWrite serializeWrite, PrimitiveCategory primitiveCategory, Object object) throws IOException {
+  public static void serializeWrite(SerializeWrite serializeWrite, PrimitiveTypeInfo primitiveTypeInfo, Object object) throws IOException {
     if (object == null) {
       serializeWrite.writeNull();
       return;
     }
-    switch (primitiveCategory) {
+    switch (primitiveTypeInfo.getPrimitiveCategory()) {
       case BOOLEAN:
       {
         boolean value = (Boolean) object;
@@ -330,7 +330,8 @@ public class VerifyFast {
     case DECIMAL:
       {
         HiveDecimal value = (HiveDecimal) object;
-        serializeWrite.writeHiveDecimal(value);
+        DecimalTypeInfo decTypeInfo = (DecimalTypeInfo)primitiveTypeInfo;
+        serializeWrite.writeHiveDecimal(value, decTypeInfo.scale());
       }
       break;
     case DATE:
@@ -365,7 +366,7 @@ public class VerifyFast {
       }
       break;
     default:
-      throw new Error("Unknown primitive category " + primitiveCategory.name());
+      throw new Error("Unknown primitive category " + primitiveTypeInfo.getPrimitiveCategory().name());
     }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java b/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java
index 4438bdc..ae476ae 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java
@@ -62,8 +62,7 @@ public class TestBinarySortableFast extends TestCase {
       int[] perFieldWriteLengths = new int[MyTestPrimitiveClass.primitiveCount];
       for (int index = 0; index < MyTestPrimitiveClass.primitiveCount; index++) {
         Object object = t.getPrimitiveObject(index);
-        PrimitiveCategory primitiveCategory = t.getPrimitiveCategory(index);
-        VerifyFast.serializeWrite(binarySortableSerializeWrite, primitiveCategory, object);
+        VerifyFast.serializeWrite(binarySortableSerializeWrite, primitiveTypeInfoMap.get(t)[index], object);
         perFieldWriteLengths[index] = output.getLength();
       }
       perFieldWriteLengthsArray[i] = perFieldWriteLengths;

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java b/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java
index 951d91a..7ebe7ae 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleFast.java
@@ -61,8 +61,7 @@ public class TestLazySimpleFast extends TestCase {
 
       for (int index = 0; index < MyTestPrimitiveClass.primitiveCount; index++) {
         Object object = t.getPrimitiveObject(index);
-        PrimitiveCategory primitiveCategory = t.getPrimitiveCategory(index);
-        VerifyFast.serializeWrite(lazySimpleSerializeWrite, primitiveCategory, object);
+        VerifyFast.serializeWrite(lazySimpleSerializeWrite, primitiveTypeInfosArray[i][index], object);
       }
 
       BytesWritable bytesWritable = new BytesWritable();

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java b/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java
index a169586..4032743 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinaryFast.java
@@ -60,8 +60,7 @@ public class TestLazyBinaryFast extends TestCase {
 
       for (int index = 0; index < MyTestPrimitiveClass.primitiveCount; index++) {
         Object object = t.getPrimitiveObject(index);
-        PrimitiveCategory primitiveCategory = t.getPrimitiveCategory(index);
-        VerifyFast.serializeWrite(lazyBinarySerializeWrite, primitiveCategory, object);
+        VerifyFast.serializeWrite(lazyBinarySerializeWrite, primitiveTypeInfosArray[i][index], object);
       }
 
       BytesWritable bytesWritable = new BytesWritable();

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
index 4ed17a2..1c6be91 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
@@ -102,6 +102,17 @@ public class HiveDecimal implements Comparable<HiveDecimal> {
   public String toString() {
      return bd.toPlainString();
   }
+  
+  /**
+   * Return a string representation of the number with the number of decimal digits as
+   * the given scale. Please note that this is different from toString().
+   * @param scale the number of digits after the decimal point
+   * @return the string representation of exact number of decimal digits
+   */
+  public String toFormatString(int scale) {
+    return (bd.scale() == scale ? bd :
+      bd.setScale(scale, RoundingMode.HALF_UP)).toPlainString();
+  }
 
   public HiveDecimal setScale(int i) {
     return new HiveDecimal(bd.setScale(i, RoundingMode.HALF_UP));

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
index a7d31fa..fe8ad85 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
@@ -17,10 +17,8 @@
  */
 
 package org.apache.hadoop.hive.ql.exec.vector;
-import java.io.IOException;
 import java.math.BigInteger;
 
-
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 


[50/55] [abbrv] hive git commit: HIVE-12320 : hive.metastore.disallow.incompatible.col.type.changes should be true by default (Ashutosh Chauhan via Jason Dere)

Posted by jx...@apache.org.
HIVE-12320 : hive.metastore.disallow.incompatible.col.type.changes should be true by default (Ashutosh Chauhan via Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e542f7f3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e542f7f3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e542f7f3

Branch: refs/heads/master-fixed
Commit: e542f7f3cb103b7d33914d8b7510fbb294d8369c
Parents: e1b3b3f
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Nov 5 15:55:39 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Nov 5 15:55:39 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +-
 .../hive/hcatalog/cli/TestSemanticAnalysis.java |   1 +
 .../hive/hcatalog/api/TestHCatClient.java       |   2 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |  13 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   | 118 ++-----------------
 .../hive/ql/parse/TypeCheckProcFactory.java     |   5 +-
 .../hive/ql/exec/TestFunctionRegistry.java      |   2 +-
 .../disallow_incompatible_type_change_on1.q     |   6 +-
 ql/src/test/queries/clientpositive/alter1.q     |   6 +-
 .../queries/clientpositive/avro_partitioned.q   |   3 +-
 .../columnarserde_create_shortcut.q             |   2 +
 ql/src/test/queries/clientpositive/input3.q     |  10 +-
 ql/src/test/queries/clientpositive/lineage3.q   |   3 +-
 .../clientpositive/orc_int_type_promotion.q     |   2 +
 .../clientpositive/parquet_schema_evolution.q   |   6 +-
 .../partition_wise_fileformat11.q               |   4 +-
 .../partition_wise_fileformat12.q               |   4 +-
 .../partition_wise_fileformat13.q               |   5 +-
 .../partition_wise_fileformat15.q               |   4 +-
 .../partition_wise_fileformat16.q               |   4 +-
 .../test/queries/clientpositive/rename_column.q |   4 +-
 .../disallow_incompatible_type_change_on1.q.out |   3 +-
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |  95 ++++++++++++++-
 23 files changed, 154 insertions(+), 150 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 3ab73ad..98f9206 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -637,7 +637,7 @@ public class HiveConf extends Configuration {
         "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
         "pruning is the correct behaviour"),
     METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
-        "hive.metastore.disallow.incompatible.col.type.changes", false,
+        "hive.metastore.disallow.incompatible.col.type.changes", true,
         "If true (default is false), ALTER TABLE operations which change the type of a\n" +
         "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
         "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
index 606cb3a..cf15ff2 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
@@ -68,6 +68,7 @@ public class TestSemanticAnalysis extends HCatBaseTest {
           "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe");
       hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
           HCatSemanticAnalyzer.class.getName());
+      hcatConf.setBoolVar(HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, false);
       hcatDriver = new Driver(hcatConf);
       SessionState.start(new CliSessionState(hcatConf));
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
index 891322a..aa9c7d3 100644
--- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
+++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
@@ -565,7 +565,7 @@ public class TestHCatClient {
       client.createTable(HCatCreateTableDesc.create(dbName, tableName, oldSchema).build());
 
       List<HCatFieldSchema> newSchema = Arrays.asList(new HCatFieldSchema("completely", Type.DOUBLE, ""),
-          new HCatFieldSchema("new", Type.FLOAT, ""),
+          new HCatFieldSchema("new", Type.STRING, ""),
           new HCatFieldSchema("fields", Type.STRING, ""));
 
       client.updateTableSchema(dbName, tableName, newSchema);

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index bbaa1ce..02cbd76 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -51,11 +51,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -79,6 +77,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
 import org.apache.hive.common.util.ReflectionUtil;
@@ -632,9 +631,6 @@ public class MetaStoreUtils {
    * Two types are compatible if we have internal functions to cast one to another.
    */
   static private boolean areColTypesCompatible(String oldType, String newType) {
-    if (oldType.equals(newType)) {
-      return true;
-    }
 
     /*
      * RCFile default serde (ColumnarSerde) serializes the values in such a way that the
@@ -645,12 +641,9 @@ public class MetaStoreUtils {
      * Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are
      * not blocked.
      */
-    if(serdeConstants.PrimitiveTypes.contains(oldType.toLowerCase()) &&
-        serdeConstants.PrimitiveTypes.contains(newType.toLowerCase())) {
-      return true;
-    }
 
-    return false;
+    return TypeInfoUtils.implicitConvertible(TypeInfoUtils.getTypeInfoFromTypeString(oldType),
+      TypeInfoUtils.getTypeInfoFromTypeString(newType));
   }
 
   public static final int MAX_MS_TYPENAME_LENGTH = 2000; // 4000/2, for an unlikely unicode case

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 2196ca9..5353062 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -22,7 +22,6 @@ import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.EnumMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashSet;
@@ -558,30 +557,6 @@ public final class FunctionRegistry {
     return synonyms;
   }
 
-  // The ordering of types here is used to determine which numeric types
-  // are common/convertible to one another. Probably better to rely on the
-  // ordering explicitly defined here than to assume that the enum values
-  // that were arbitrarily assigned in PrimitiveCategory work for our purposes.
-  static EnumMap<PrimitiveCategory, Integer> numericTypes =
-      new EnumMap<PrimitiveCategory, Integer>(PrimitiveCategory.class);
-  static List<PrimitiveCategory> numericTypeList = new ArrayList<PrimitiveCategory>();
-
-  static void registerNumericType(PrimitiveCategory primitiveCategory, int level) {
-    numericTypeList.add(primitiveCategory);
-    numericTypes.put(primitiveCategory, level);
-  }
-
-  static {
-    registerNumericType(PrimitiveCategory.BYTE, 1);
-    registerNumericType(PrimitiveCategory.SHORT, 2);
-    registerNumericType(PrimitiveCategory.INT, 3);
-    registerNumericType(PrimitiveCategory.LONG, 4);
-    registerNumericType(PrimitiveCategory.FLOAT, 5);
-    registerNumericType(PrimitiveCategory.DOUBLE, 6);
-    registerNumericType(PrimitiveCategory.DECIMAL, 7);
-    registerNumericType(PrimitiveCategory.STRING, 8);
-  }
-
   /**
    * Check if the given type is numeric. String is considered numeric when used in
    * numeric operators.
@@ -702,15 +677,15 @@ public final class FunctionRegistry {
           (PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,PrimitiveCategory.STRING);
     }
 
-    if (FunctionRegistry.implicitConvertible(a, b)) {
+    if (TypeInfoUtils.implicitConvertible(a, b)) {
       return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, pcB);
     }
-    if (FunctionRegistry.implicitConvertible(b, a)) {
+    if (TypeInfoUtils.implicitConvertible(b, a)) {
       return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, pcA);
     }
-    for (PrimitiveCategory t : numericTypeList) {
-      if (FunctionRegistry.implicitConvertible(pcA, t)
-          && FunctionRegistry.implicitConvertible(pcB, t)) {
+    for (PrimitiveCategory t : TypeInfoUtils.numericTypeList) {
+      if (TypeInfoUtils.implicitConvertible(pcA, t)
+          && TypeInfoUtils.implicitConvertible(pcB, t)) {
         return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, t);
       }
     }
@@ -759,9 +734,9 @@ public final class FunctionRegistry {
       return TypeInfoFactory.doubleTypeInfo;
     }
 
-    for (PrimitiveCategory t : numericTypeList) {
-      if (FunctionRegistry.implicitConvertible(pcA, t)
-          && FunctionRegistry.implicitConvertible(pcB, t)) {
+    for (PrimitiveCategory t : TypeInfoUtils.numericTypeList) {
+      if (TypeInfoUtils.implicitConvertible(pcA, t)
+          && TypeInfoUtils.implicitConvertible(pcB, t)) {
         return getTypeInfoForPrimitiveCategory((PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b, t);
       }
     }
@@ -790,8 +765,8 @@ public final class FunctionRegistry {
     if (pgB == PrimitiveGrouping.DATE_GROUP && pgA == PrimitiveGrouping.STRING_GROUP) {
       return PrimitiveCategory.STRING;
     }
-    Integer ai = numericTypes.get(pcA);
-    Integer bi = numericTypes.get(pcB);
+    Integer ai = TypeInfoUtils.numericTypes.get(pcA);
+    Integer bi = TypeInfoUtils.numericTypes.get(pcB);
     if (ai == null || bi == null) {
       // If either is not a numeric type, return null.
       return null;
@@ -870,73 +845,6 @@ public final class FunctionRegistry {
     return TypeInfoFactory.getStructTypeInfo(names, typeInfos);
   }
 
-  public static boolean implicitConvertible(PrimitiveCategory from, PrimitiveCategory to) {
-    if (from == to) {
-      return true;
-    }
-
-    PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from);
-    PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to);
-
-    // Allow implicit String to Double conversion
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE) {
-      return true;
-    }
-    // Allow implicit String to Decimal conversion
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL) {
-      return true;
-    }
-    // Void can be converted to any type
-    if (from == PrimitiveCategory.VOID) {
-      return true;
-    }
-
-    // Allow implicit String to Date conversion
-    if (fromPg == PrimitiveGrouping.DATE_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
-      return true;
-    }
-    // Allow implicit Numeric to String conversion
-    if (fromPg == PrimitiveGrouping.NUMERIC_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
-      return true;
-    }
-    // Allow implicit String to varchar conversion, and vice versa
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
-      return true;
-    }
-
-    // Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double
-    // Decimal -> String
-    Integer f = numericTypes.get(from);
-    Integer t = numericTypes.get(to);
-    if (f == null || t == null) {
-      return false;
-    }
-    if (f.intValue() > t.intValue()) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * Returns whether it is possible to implicitly convert an object of Class
-   * from to Class to.
-   */
-  public static boolean implicitConvertible(TypeInfo from, TypeInfo to) {
-    if (from.equals(to)) {
-      return true;
-    }
-
-    // Reimplemented to use PrimitiveCategory rather than TypeInfo, because
-    // 2 TypeInfos from the same qualified type (varchar, decimal) should still be
-    // seen as equivalent.
-    if (from.getCategory() == Category.PRIMITIVE && to.getCategory() == Category.PRIMITIVE) {
-      return implicitConvertible(
-          ((PrimitiveTypeInfo) from).getPrimitiveCategory(),
-          ((PrimitiveTypeInfo) to).getPrimitiveCategory());
-    }
-    return false;
-  }
-
   /**
    * Get the GenericUDAF evaluator for the name and argumentClasses.
    *
@@ -1105,7 +1013,7 @@ public final class FunctionRegistry {
       // but there is a conversion cost.
       return 1;
     }
-    if (!exact && implicitConvertible(argumentPassed, argumentAccepted)) {
+    if (!exact && TypeInfoUtils.implicitConvertible(argumentPassed, argumentAccepted)) {
       return 1;
     }
 
@@ -1273,9 +1181,9 @@ public final class FunctionRegistry {
             acceptedIsPrimitive = true;
             acceptedPrimCat = ((PrimitiveTypeInfo) accepted).getPrimitiveCategory();
           }
-          if (acceptedIsPrimitive && numericTypes.containsKey(acceptedPrimCat)) {
+          if (acceptedIsPrimitive && TypeInfoUtils.numericTypes.containsKey(acceptedPrimCat)) {
             // We're looking for the udf with the smallest maximum numeric type.
-            int typeValue = numericTypes.get(acceptedPrimCat);
+            int typeValue = TypeInfoUtils.numericTypes.get(acceptedPrimCat);
             maxNumericType = typeValue > maxNumericType ? typeValue : maxNumericType;
           } else if (!accepted.equals(reference)) {
             // There are non-numeric arguments that don't match from one UDF to

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index 3a6535b..7f5d72a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hive.common.util.DateUtils;
@@ -903,7 +904,7 @@ public class TypeCheckProcFactory {
 
         if (myt.getCategory() == Category.LIST) {
           // Only allow integer index for now
-          if (!FunctionRegistry.implicitConvertible(children.get(1).getTypeInfo(),
+          if (!TypeInfoUtils.implicitConvertible(children.get(1).getTypeInfo(),
               TypeInfoFactory.intTypeInfo)) {
             throw new SemanticException(SemanticAnalyzer.generateErrorMessage(
                   expr, ErrorMsg.INVALID_ARRAYINDEX_TYPE.getMsg()));
@@ -913,7 +914,7 @@ public class TypeCheckProcFactory {
           TypeInfo t = ((ListTypeInfo) myt).getListElementTypeInfo();
           desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry.getGenericUDFForIndex(), children);
         } else if (myt.getCategory() == Category.MAP) {
-          if (!FunctionRegistry.implicitConvertible(children.get(1).getTypeInfo(),
+          if (!TypeInfoUtils.implicitConvertible(children.get(1).getTypeInfo(),
               ((MapTypeInfo) myt).getMapKeyTypeInfo())) {
             throw new SemanticException(ErrorMsg.INVALID_MAPINDEX_TYPE
                 .getMsg(expr));

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
index 068bdee..6a83c32 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
@@ -80,7 +80,7 @@ public class TestFunctionRegistry extends TestCase {
   }
 
   private void implicit(TypeInfo a, TypeInfo b, boolean convertible) {
-    assertEquals(convertible, FunctionRegistry.implicitConvertible(a, b));
+    assertEquals(convertible, TypeInfoUtils.implicitConvertible(a, b));
   }
 
   public void testImplicitConversion() {

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q b/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
index d0d748c..cec9a0d 100644
--- a/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
+++ b/ql/src/test/queries/clientnegative/disallow_incompatible_type_change_on1.q
@@ -1,4 +1,4 @@
-SET hive.metastore.disallow.incompatible.col.type.changes=true;
+SET hive.metastore.disallow.incompatible.col.type.changes=false;
 SELECT * FROM src LIMIT 1;
 CREATE TABLE test_table123 (a INT, b MAP<STRING, STRING>) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE;
 INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src LIMIT 1;
@@ -11,7 +11,11 @@ ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP<STRING, STRING>);
 ALTER TABLE test_table123 REPLACE COLUMNS (a BOOLEAN, b MAP<STRING, STRING>);
 ALTER TABLE test_table123 REPLACE COLUMNS (a TINYINT, b MAP<STRING, STRING>);
 ALTER TABLE test_table123 CHANGE COLUMN a a_new BOOLEAN;
+
+SET hive.metastore.disallow.incompatible.col.type.changes=true;
 -- All the above ALTERs will succeed since they are between compatible types.
 -- The following ALTER will fail as MAP<STRING, STRING> and STRING are not
 -- compatible.
+
 ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING);
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/alter1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter1.q b/ql/src/test/queries/clientpositive/alter1.q
index 2fac195..767ab5c 100644
--- a/ql/src/test/queries/clientpositive/alter1.q
+++ b/ql/src/test/queries/clientpositive/alter1.q
@@ -21,8 +21,9 @@ describe extended alter1;
 
 alter table alter1 set serde 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe';
 describe extended alter1;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table alter1 replace columns (a int, b int, c string);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 describe alter1;
 
 -- Cleanup
@@ -61,8 +62,9 @@ DESCRIBE EXTENDED alter1_db.alter1;
 
 ALTER TABLE alter1_db.alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe';
 DESCRIBE EXTENDED alter1_db.alter1;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 ALTER TABLE alter1_db.alter1 REPLACE COLUMNS (a int, b int, c string);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DESCRIBE alter1_db.alter1;
 
 DROP TABLE alter1_db.alter1;

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/avro_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/avro_partitioned.q b/ql/src/test/queries/clientpositive/avro_partitioned.q
index a06e7c4..9e6c79a 100644
--- a/ql/src/test/queries/clientpositive/avro_partitioned.q
+++ b/ql/src/test/queries/clientpositive/avro_partitioned.q
@@ -112,7 +112,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat';
 
 -- Insert data into a partition
 INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 -- Evolve the table schema by adding new array field "cast_and_crew"
 ALTER TABLE episodes_partitioned_serdeproperties
 SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
@@ -144,5 +144,6 @@ WITH SERDEPROPERTIES ('avro.schema.literal'='{
   ]
 }');
 
+reset hive.metastore.disallow.incompatible.col.type.changes;
 -- Try selecting from the evolved table
 SELECT * FROM episodes_partitioned_serdeproperties;

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q b/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
index 8d8cb6b..851a821 100644
--- a/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
+++ b/ql/src/test/queries/clientpositive/columnarserde_create_shortcut.q
@@ -22,5 +22,7 @@ SELECT * FROM columnShortcutTable;
 
 ALTER TABLE columnShortcutTable ADD COLUMNS (c string);
 SELECT * FROM columnShortcutTable;
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 ALTER TABLE columnShortcutTable REPLACE COLUMNS (key int);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 SELECT * FROM columnShortcutTable;

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/input3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input3.q b/ql/src/test/queries/clientpositive/input3.q
index 2efa7a4..1925fff 100644
--- a/ql/src/test/queries/clientpositive/input3.q
+++ b/ql/src/test/queries/clientpositive/input3.q
@@ -1,7 +1,3 @@
-
-
-
-
 CREATE TABLE TEST3a(A INT, B DOUBLE) STORED AS TEXTFILE; 
 DESCRIBE TEST3a; 
 CREATE TABLE TEST3b(A ARRAY<INT>, B DOUBLE, C MAP<DOUBLE, INT>) STORED AS TEXTFILE; 
@@ -16,11 +12,9 @@ ALTER TABLE TEST3b RENAME TO TEST3c;
 ALTER TABLE TEST3b RENAME TO TEST3c;
 DESCRIBE TEST3c; 
 SHOW TABLES;
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 EXPLAIN
 ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE);
 ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE);
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DESCRIBE EXTENDED TEST3c;
-
-
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/lineage3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lineage3.q b/ql/src/test/queries/clientpositive/lineage3.q
index 70d4e57..d1fb454 100644
--- a/ql/src/test/queries/clientpositive/lineage3.q
+++ b/ql/src/test/queries/clientpositive/lineage3.q
@@ -1,5 +1,5 @@
 set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 drop table if exists d1;
 create table d1(a int);
 
@@ -202,3 +202,4 @@ insert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d wh
 insert into dest_dp2 partition (y=1, m) select f, w, m
 insert into dest_dp1 partition (year=0) select f, w;
 
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
index 4a805a0..c3e2cf9 100644
--- a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
+++ b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
@@ -1,3 +1,4 @@
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 create table if not exists alltypes (
  bo boolean,
  ti tinyint,
@@ -77,3 +78,4 @@ select * from src_part_orc limit 10;
 
 alter table src_part_orc change key key bigint;
 select * from src_part_orc limit 10;
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_schema_evolution.q b/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
index af0cf99..d2f2996 100644
--- a/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
+++ b/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
@@ -11,10 +11,10 @@ INSERT OVERWRITE TABLE NewStructField SELECT named_struct('a1', map('k1','v1'),
 
 DESCRIBE NewStructField;
 SELECT * FROM NewStructField;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 -- Adds new fields to the struct types
 ALTER TABLE NewStructField REPLACE COLUMNS (a struct<a1:map<string,string>, a2:struct<e1:int,e2:string>, a3:int>, b int);
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DESCRIBE NewStructField;
 SELECT * FROM NewStructField;
 
@@ -24,4 +24,4 @@ DESCRIBE NewStructFieldTable;
 SELECT * FROM NewStructFieldTable;
 
 DROP TABLE NewStructField;
-DROP TABLE NewStructFieldTable;
\ No newline at end of file
+DROP TABLE NewStructFieldTable;

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
index 1a4291f..b2db2f1 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
@@ -7,9 +7,9 @@ insert overwrite table partition_test_partitioned partition(dt='1') select * fro
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
index bc51cb5..632d022 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
@@ -7,9 +7,9 @@ insert overwrite table partition_test_partitioned partition(dt='1') select * fro
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
index 2e4ae69..f124ec3 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
@@ -4,8 +4,9 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 create table T1(key string, value string) partitioned by (dt string) stored as rcfile;
 alter table T1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
 insert overwrite table T1 partition (dt='1') select * from src where key = 238 or key = 97;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table T1 change key key int;
+reset hive.metastore.disallow.incompatible.col.type.changes;
 insert overwrite table T1 partition (dt='2') select * from src where key = 238 or key = 97;
 
 alter table T1 change key key string;
@@ -14,4 +15,4 @@ create table T2(key string, value string) partitioned by (dt string) stored as r
 insert overwrite table T2 partition (dt='1') select * from src where key = 238 or key = 97;
 
 select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
-select count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
\ No newline at end of file
+select count(*) FROM T1 a JOIN T2 b ON a.key = b.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
index 6fce1e0..70a454f 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
@@ -8,9 +8,9 @@ select * from src where key = 238;
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
index 37bb1a7..92757f6 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
@@ -8,9 +8,9 @@ select * from src where key = 238;
 
 select * from partition_test_partitioned where dt is not null;
 select key+key, value from partition_test_partitioned where dt is not null;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 alter table partition_test_partitioned change key key int;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 select key+key, value from partition_test_partitioned where dt is not null;
 select * from partition_test_partitioned where dt is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/queries/clientpositive/rename_column.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/rename_column.q b/ql/src/test/queries/clientpositive/rename_column.q
index a3f3f30..a211cfa 100644
--- a/ql/src/test/queries/clientpositive/rename_column.q
+++ b/ql/src/test/queries/clientpositive/rename_column.q
@@ -3,7 +3,7 @@ DESCRIBE kv_rename_test;
 
 ALTER TABLE kv_rename_test CHANGE a a STRING;
 DESCRIBE kv_rename_test;
-
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 ALTER TABLE kv_rename_test CHANGE a a1 INT;
 DESCRIBE kv_rename_test;
 
@@ -52,6 +52,6 @@ DESCRIBE kv_rename_test;
 
 ALTER TABLE kv_rename_test CHANGE COLUMN a2 a INT AFTER b;
 DESCRIBE kv_rename_test;
-
+reset hive.metastore.disallow.incompatible.col.type.changes;
 DROP TABLE kv_rename_test;
 SHOW TABLES;

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
index 96600eb..69b2b41 100644
--- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
+++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
@@ -103,9 +103,10 @@ POSTHOOK: Output: default@test_table123
 PREHOOK: query: -- All the above ALTERs will succeed since they are between compatible types.
 -- The following ALTER will fail as MAP<STRING, STRING> and STRING are not
 -- compatible.
+
 ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@test_table123
 PREHOOK: Output: default@test_table123
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
-b
+a,b

http://git-wip-us.apache.org/repos/asf/hive/blob/e542f7f3/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
index 24361c7..1d79880 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
@@ -23,6 +23,7 @@ import java.lang.reflect.Method;
 import java.lang.reflect.ParameterizedType;
 import java.lang.reflect.Type;
 import java.util.ArrayList;
+import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveGrouping;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveTypeEntry;
 
 /**
@@ -53,6 +55,25 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
  */
 public final class TypeInfoUtils {
 
+  public static List<PrimitiveCategory> numericTypeList = new ArrayList<PrimitiveCategory>();
+  // The ordering of types here is used to determine which numeric types
+  // are common/convertible to one another. Probably better to rely on the
+  // ordering explicitly defined here than to assume that the enum values
+  // that were arbitrarily assigned in PrimitiveCategory work for our purposes.
+  public static EnumMap<PrimitiveCategory, Integer> numericTypes =
+      new EnumMap<PrimitiveCategory, Integer>(PrimitiveCategory.class);
+
+  static {
+    registerNumericType(PrimitiveCategory.BYTE, 1);
+    registerNumericType(PrimitiveCategory.SHORT, 2);
+    registerNumericType(PrimitiveCategory.INT, 3);
+    registerNumericType(PrimitiveCategory.LONG, 4);
+    registerNumericType(PrimitiveCategory.FLOAT, 5);
+    registerNumericType(PrimitiveCategory.DOUBLE, 6);
+    registerNumericType(PrimitiveCategory.DECIMAL, 7);
+    registerNumericType(PrimitiveCategory.STRING, 8);
+  }
+
   private TypeInfoUtils() {
     // prevent instantiation
   }
@@ -266,7 +287,7 @@ public final class TypeInfoUtils {
      *
      * tokenize("map<int,string>") should return
      * ["map","<","int",",","string",">"]
-     * 
+     *
      * Note that we add '$' in new Calcite return path. As '$' will not appear
      * in any type in Hive, it is safe to do so.
      */
@@ -810,4 +831,76 @@ public final class TypeInfoUtils {
         return 0;
     }
   }
+
+  public static void registerNumericType(PrimitiveCategory primitiveCategory, int level) {
+    numericTypeList.add(primitiveCategory);
+    numericTypes.put(primitiveCategory, level);
+  }
+
+  public static boolean implicitConvertible(PrimitiveCategory from, PrimitiveCategory to) {
+    if (from == to) {
+      return true;
+    }
+
+    PrimitiveGrouping fromPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(from);
+    PrimitiveGrouping toPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(to);
+
+    // Allow implicit String to Double conversion
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE) {
+      return true;
+    }
+    // Allow implicit String to Decimal conversion
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL) {
+      return true;
+    }
+    // Void can be converted to any type
+    if (from == PrimitiveCategory.VOID) {
+      return true;
+    }
+
+    // Allow implicit String to Date conversion
+    if (fromPg == PrimitiveGrouping.DATE_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
+      return true;
+    }
+    // Allow implicit Numeric to String conversion
+    if (fromPg == PrimitiveGrouping.NUMERIC_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
+      return true;
+    }
+    // Allow implicit String to varchar conversion, and vice versa
+    if (fromPg == PrimitiveGrouping.STRING_GROUP && toPg == PrimitiveGrouping.STRING_GROUP) {
+      return true;
+    }
+
+    // Allow implicit conversion from Byte -> Integer -> Long -> Float -> Double
+    // Decimal -> String
+    Integer f = numericTypes.get(from);
+    Integer t = numericTypes.get(to);
+    if (f == null || t == null) {
+      return false;
+    }
+    if (f.intValue() > t.intValue()) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Returns whether it is possible to implicitly convert an object of Class
+   * from to Class to.
+   */
+  public static boolean implicitConvertible(TypeInfo from, TypeInfo to) {
+    if (from.equals(to)) {
+      return true;
+    }
+
+    // Reimplemented to use PrimitiveCategory rather than TypeInfo, because
+    // 2 TypeInfos from the same qualified type (varchar, decimal) should still be
+    // seen as equivalent.
+    if (from.getCategory() == Category.PRIMITIVE && to.getCategory() == Category.PRIMITIVE) {
+      return implicitConvertible(
+          ((PrimitiveTypeInfo) from).getPrimitiveCategory(),
+          ((PrimitiveTypeInfo) to).getPrimitiveCategory());
+    }
+    return false;
+  }
 }


[17/55] [abbrv] hive git commit: HIVE-12235 : Improve beeline logging for dynamic service discovery (Szehon, reviewed by Vaibhav Gumashta)

Posted by jx...@apache.org.
HIVE-12235 : Improve beeline logging for dynamic service discovery (Szehon, reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/492a10f1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/492a10f1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/492a10f1

Branch: refs/heads/master-fixed
Commit: 492a10f101471226004b6f571d7f8c8a79103664
Parents: 902a548
Author: Szehon Ho <sz...@cloudera.com>
Authored: Mon Nov 2 16:38:03 2015 -0800
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Mon Nov 2 16:38:03 2015 -0800

----------------------------------------------------------------------
 beeline/src/main/resources/beeline-log4j2.xml   |  4 +++-
 .../org/apache/hive/jdbc/HiveConnection.java    | 21 ++++++++++++++------
 2 files changed, 18 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/492a10f1/beeline/src/main/resources/beeline-log4j2.xml
----------------------------------------------------------------------
diff --git a/beeline/src/main/resources/beeline-log4j2.xml b/beeline/src/main/resources/beeline-log4j2.xml
index a64f55e..55ec0f5 100644
--- a/beeline/src/main/resources/beeline-log4j2.xml
+++ b/beeline/src/main/resources/beeline-log4j2.xml
@@ -34,6 +34,8 @@
     <Root level="${sys:hive.log.level}">
       <AppenderRef ref="${sys:hive.root.logger}"/>
     </Root>
+    <!-- HiveConnection logs useful info for dynamic service discovery -->
+    <logger name="org.apache.hive.jdbc.HiveConnection" level="INFO"/>
   </Loggers>
 
-</Configuration>
+</Configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/492a10f1/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index e38c585..f79d73d 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -204,16 +204,14 @@ public class HiveConnection implements java.sql.Connection {
                 .get(JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE));
         transport = isHttpTransportMode() ? createHttpTransport() : createBinaryTransport();
         if (!transport.isOpen()) {
-          LOG.info("Will try to open client transport with JDBC Uri: " + jdbcUriString);
           transport.open();
+          logZkDiscoveryMessage("Connected to " + connParams.getHost() + ":" + connParams.getPort());
         }
         break;
       } catch (TTransportException e) {
-        LOG.info("Could not open client transport with JDBC Uri: " + jdbcUriString);
         // We'll retry till we exhaust all HiveServer2 nodes from ZooKeeper
-        if ((sessConfMap.get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE) != null)
-            && (JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER.equalsIgnoreCase(sessConfMap
-                .get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE)))) {
+        if (isZkDynamicDiscoveryMode()) {
+          LOG.info("Failed to connect to " + connParams.getHost() + ":" + connParams.getPort());
           try {
             // Update jdbcUriString, host & port variables in connParams
             // Throw an exception if all HiveServer2 nodes have been exhausted,
@@ -228,7 +226,6 @@ public class HiveConnection implements java.sql.Connection {
           jdbcUriString = connParams.getJdbcUriString();
           host = connParams.getHost();
           port = connParams.getPort();
-          LOG.info("Will retry opening client transport");
         } else {
           LOG.info("Transport Used for JDBC connection: " +
             sessConfMap.get(JdbcConnectionParams.TRANSPORT_MODE));
@@ -650,6 +647,18 @@ public class HiveConnection implements java.sql.Connection {
     return false;
   }
 
+  private boolean isZkDynamicDiscoveryMode() {
+    return (sessConfMap.get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE) != null)
+      && (JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER.equalsIgnoreCase(sessConfMap
+      .get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE)));
+  }
+
+  private void logZkDiscoveryMessage(String message) {
+    if (isZkDynamicDiscoveryMode()) {
+      LOG.info(message);
+    }
+  }
+
   /**
    * Lookup varName in sessConfMap, if its null or empty return the default
    * value varDefault


[28/55] [abbrv] hive git commit: HIVE-12206: ClassNotFound Exception during query compilation with Tez and Union query and GenericUDFs (Jason Dere, reviewed by Ashutosh Chauhan)

Posted by jx...@apache.org.
HIVE-12206: ClassNotFound Exception during query compilation with Tez and Union query and GenericUDFs (Jason Dere, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fe6ebf77
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fe6ebf77
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fe6ebf77

Branch: refs/heads/master-fixed
Commit: fe6ebf77a5494b737479af3f1159e46ed6aa9d24
Parents: 7073ce3
Author: Jason Dere <jd...@hortonworks.com>
Authored: Tue Nov 3 15:38:31 2015 -0800
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Tue Nov 3 15:38:31 2015 -0800

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |  1 +
 .../hive/udf/example/GenericUDFExampleAdd.java  | 48 ++++++++++++++++++++
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  2 +
 .../queries/clientpositive/tez_union_with_udf.q | 13 ++++++
 .../clientpositive/tez/tez_union_with_udf.q.out | 36 +++++++++++++++
 5 files changed, 100 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/fe6ebf77/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 7416d32..2d1d274 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -424,6 +424,7 @@ minillap.query.files=bucket_map_join_tez1.q,\
   tez_union_view.q,\
   tez_union_decimal.q,\
   tez_union_group_by.q,\
+  tez_union_with_udf.q,\
   tez_smb_main.q,\
   tez_smb_1.q,\
   vectorized_dynamic_partition_pruning.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/fe6ebf77/itests/test-serde/src/main/java/org/apache/hadoop/hive/udf/example/GenericUDFExampleAdd.java
----------------------------------------------------------------------
diff --git a/itests/test-serde/src/main/java/org/apache/hadoop/hive/udf/example/GenericUDFExampleAdd.java b/itests/test-serde/src/main/java/org/apache/hadoop/hive/udf/example/GenericUDFExampleAdd.java
new file mode 100644
index 0000000..85906c2
--- /dev/null
+++ b/itests/test-serde/src/main/java/org/apache/hadoop/hive/udf/example/GenericUDFExampleAdd.java
@@ -0,0 +1,48 @@
+package org.apache.hadoop.hive.udf.example;
+
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.serde2.io.DoubleWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+
+public class GenericUDFExampleAdd extends GenericUDF {
+
+  Converter converter0;
+  Converter converter1;
+  DoubleWritable result = new DoubleWritable();
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments)
+      throws UDFArgumentException {
+    ObjectInspector doubleOI = PrimitiveObjectInspectorFactory
+        .getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.DOUBLE);
+    converter0 = (Converter) ObjectInspectorConverters.getConverter(arguments[0], doubleOI);
+    converter1 = (Converter) ObjectInspectorConverters.getConverter(arguments[1], doubleOI);
+    return doubleOI;
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+    DoubleWritable dw0 = (DoubleWritable) converter0.convert(arguments[0].get());
+    DoubleWritable dw1 = (DoubleWritable) converter1.convert(arguments[0].get());
+    if (dw0 == null || dw1 == null) {
+      return null;
+    }
+    result.set(dw0.get() + dw1.get());
+    return result;
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    return "GenericUDFExampleAdd";
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/fe6ebf77/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 665b3f7..02adf0c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -1057,6 +1057,7 @@ public final class Utilities {
    */
   private static void serializeObjectByKryo(Kryo kryo, Object plan, OutputStream out) {
     Output output = new Output(out);
+    kryo.setClassLoader(getSessionSpecifiedClassLoader());
     kryo.writeObject(output, plan);
     output.close();
   }
@@ -1080,6 +1081,7 @@ public final class Utilities {
 
   private static <T> T deserializeObjectByKryo(Kryo kryo, InputStream in, Class<T> clazz ) {
     Input inp = new Input(in);
+    kryo.setClassLoader(getSessionSpecifiedClassLoader());
     T t = kryo.readObject(inp,clazz);
     inp.close();
     return t;

http://git-wip-us.apache.org/repos/asf/hive/blob/fe6ebf77/ql/src/test/queries/clientpositive/tez_union_with_udf.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_union_with_udf.q b/ql/src/test/queries/clientpositive/tez_union_with_udf.q
new file mode 100644
index 0000000..6826530
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/tez_union_with_udf.q
@@ -0,0 +1,13 @@
+select * from (select key + key from src limit 1) a
+union all
+select * from (select key + key from src limit 1) b;
+
+
+add jar ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar;
+
+create temporary function example_add as 'org.apache.hadoop.hive.udf.example.GenericUDFExampleAdd';
+
+-- Now try the query with the UDF
+select example_add(key, key)from (select key from src limit 1) a
+union all
+select example_add(key, key)from (select key from src limit 1) b;

http://git-wip-us.apache.org/repos/asf/hive/blob/fe6ebf77/ql/src/test/results/clientpositive/tez/tez_union_with_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_union_with_udf.q.out b/ql/src/test/results/clientpositive/tez/tez_union_with_udf.q.out
new file mode 100644
index 0000000..923e098
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/tez_union_with_udf.q.out
@@ -0,0 +1,36 @@
+PREHOOK: query: select * from (select key + key from src limit 1) a
+union all
+select * from (select key + key from src limit 1) b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select key + key from src limit 1) a
+union all
+select * from (select key + key from src limit 1) b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+476.0
+476.0
+PREHOOK: query: create temporary function example_add as 'org.apache.hadoop.hive.udf.example.GenericUDFExampleAdd'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: example_add
+POSTHOOK: query: create temporary function example_add as 'org.apache.hadoop.hive.udf.example.GenericUDFExampleAdd'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: example_add
+PREHOOK: query: -- Now try the query with the UDF
+select example_add(key, key)from (select key from src limit 1) a
+union all
+select example_add(key, key)from (select key from src limit 1) b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- Now try the query with the UDF
+select example_add(key, key)from (select key from src limit 1) a
+union all
+select example_add(key, key)from (select key from src limit 1) b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+476.0
+476.0


[51/55] [abbrv] hive git commit: HIVE-12315: Fix Vectorized double divide by zero (Gopal V, reviewed by Matt McCline)

Posted by jx...@apache.org.
HIVE-12315: Fix Vectorized double divide by zero (Gopal V, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/973268bb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/973268bb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/973268bb

Branch: refs/heads/master-fixed
Commit: 973268bb88bbe1e30721580744dae1e668f6b944
Parents: e542f7f
Author: Gopal V <go...@apache.org>
Authored: Thu Nov 5 17:16:46 2015 -0800
Committer: Gopal V <go...@apache.org>
Committed: Thu Nov 5 17:16:46 2015 -0800

----------------------------------------------------------------------
 .../ql/exec/vector/expressions/NullUtil.java    | 21 ++++++++++++++------
 1 file changed, 15 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/973268bb/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java
index e4a9824..2eb48fb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NullUtil.java
@@ -125,20 +125,21 @@ public class NullUtil {
   public static void setNullAndDivBy0DataEntriesDouble(
       DoubleColumnVector v, boolean selectedInUse, int[] sel, int n, LongColumnVector denoms) {
     assert v.isRepeating || !denoms.isRepeating;
+    final boolean realNulls = !v.noNulls;
     v.noNulls = false;
     long[] vector = denoms.vector;
-    if (v.isRepeating && (v.isNull[0] = (v.isNull[0] || vector[0] == 0))) {
+    if (v.isRepeating && (v.isNull[0] = ((realNulls && v.isNull[0]) || vector[0] == 0))) {
       v.vector[0] = DoubleColumnVector.NULL_VALUE;
     } else if (selectedInUse) {
       for (int j = 0; j != n; j++) {
         int i = sel[j];
-        if (v.isNull[i] = (v.isNull[i] || vector[i] == 0)) {
+        if (v.isNull[i] = ((realNulls && v.isNull[i]) || vector[i] == 0)) {
           v.vector[i] = DoubleColumnVector.NULL_VALUE;
         }
       }
     } else {
       for (int i = 0; i != n; i++) {
-        if (v.isNull[i] = (v.isNull[i] || vector[i] == 0)) {
+        if (v.isNull[i] = ((realNulls && v.isNull[i]) || vector[i] == 0)) {
           v.vector[i] = DoubleColumnVector.NULL_VALUE;
         }
       }
@@ -152,20 +153,21 @@ public class NullUtil {
   public static void setNullAndDivBy0DataEntriesDouble(
       DoubleColumnVector v, boolean selectedInUse, int[] sel, int n, DoubleColumnVector denoms) {
     assert v.isRepeating || !denoms.isRepeating;
+    final boolean realNulls = !v.noNulls;
     v.noNulls = false;
     double[] vector = denoms.vector;
-    if (v.isRepeating && (v.isNull[0] = (v.isNull[0] || vector[0] == 0))) {
+    if (v.isRepeating && (v.isNull[0] = ((realNulls && v.isNull[0]) || vector[0] == 0))) {
       v.vector[0] = DoubleColumnVector.NULL_VALUE;
     } else if (selectedInUse) {
       for (int j = 0; j != n; j++) {
         int i = sel[j];
-        if (v.isNull[i] = (v.isNull[i] || vector[i] == 0)) {
+        if (v.isNull[i] = ((realNulls && v.isNull[i]) || vector[i] == 0)) {
           v.vector[i] = DoubleColumnVector.NULL_VALUE;
         }
       }
     } else {
       for (int i = 0; i != n; i++) {
-        if (v.isNull[i] = (v.isNull[i] || vector[i] == 0)) {
+        if (v.isNull[i] = ((realNulls && v.isNull[i]) || vector[i] == 0)) {
           v.vector[i] = DoubleColumnVector.NULL_VALUE;
         }
       }
@@ -235,6 +237,13 @@ public class NullUtil {
 
     outputColVector.noNulls = inputColVector1.noNulls && inputColVector2.noNulls;
 
+    if (outputColVector.noNulls) {
+      // the inputs might not always have isNull initialized for
+      // inputColVector1.isNull[i] || inputColVector2.isNull[i] to be valid
+      Arrays.fill(outputColVector.isNull, false);
+      return;
+    }
+
     if (inputColVector1.noNulls && !inputColVector2.noNulls) {
       if (inputColVector2.isRepeating) {
         outputColVector.isNull[0] = inputColVector2.isNull[0];


[19/55] [abbrv] hive git commit: HIVE-12281: Vectorized MapJoin - use Operator::isLogDebugEnabled (Gopal V, reviewed by Matt McCline)

Posted by jx...@apache.org.
HIVE-12281: Vectorized MapJoin - use Operator::isLogDebugEnabled (Gopal V, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ad127657
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ad127657
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ad127657

Branch: refs/heads/master-fixed
Commit: ad12765714c22c056aecd7878ce05a1f3e28a038
Parents: db2c500
Author: Gopal V <go...@apache.org>
Authored: Mon Nov 2 19:50:42 2015 -0800
Committer: Gopal V <go...@apache.org>
Committed: Mon Nov 2 19:50:42 2015 -0800

----------------------------------------------------------------------
 .../vector/mapjoin/VectorMapJoinCommonOperator.java   | 14 +++++++-------
 .../mapjoin/VectorMapJoinGenerateResultOperator.java  |  8 ++++----
 .../VectorMapJoinInnerBigOnlyLongOperator.java        |  8 ++++----
 .../VectorMapJoinInnerBigOnlyMultiKeyOperator.java    |  8 ++++----
 .../VectorMapJoinInnerBigOnlyStringOperator.java      |  8 ++++----
 .../mapjoin/VectorMapJoinInnerLongOperator.java       |  8 ++++----
 .../mapjoin/VectorMapJoinInnerMultiKeyOperator.java   |  8 ++++----
 .../mapjoin/VectorMapJoinInnerStringOperator.java     |  8 ++++----
 .../mapjoin/VectorMapJoinLeftSemiLongOperator.java    |  8 ++++----
 .../VectorMapJoinLeftSemiMultiKeyOperator.java        |  8 ++++----
 .../mapjoin/VectorMapJoinLeftSemiStringOperator.java  |  8 ++++----
 .../VectorMapJoinOuterGenerateResultOperator.java     | 12 ++++++------
 .../mapjoin/VectorMapJoinOuterLongOperator.java       | 10 +++++-----
 .../mapjoin/VectorMapJoinOuterMultiKeyOperator.java   | 10 +++++-----
 .../mapjoin/VectorMapJoinOuterStringOperator.java     | 10 +++++-----
 .../mapjoin/fast/VectorMapJoinFastBytesHashTable.java |  6 ++++--
 .../mapjoin/fast/VectorMapJoinFastLongHashTable.java  |  6 ++++--
 17 files changed, 76 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
index 435b438..1667bf7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
@@ -445,7 +445,7 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
     outputProjection = projectionMapping.getOutputColumns();
     outputTypeNames = projectionMapping.getTypeNames();
 
-    if (LOG.isDebugEnabled()) {
+    if (isLogDebugEnabled) {
       int[] orderDisplayable = new int[order.length];
       for (int i = 0; i < order.length; i++) {
         orderDisplayable[i] = (int) order[i];
@@ -507,7 +507,7 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
    * columns and new scratch columns.
    */
   protected void setupVOutContext(List<String> outputColumnNames) {
-    if (LOG.isDebugEnabled()) {
+    if (isLogDebugEnabled) {
       LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor outputColumnNames " + outputColumnNames);
     }
     if (outputColumnNames.size() != outputProjection.length) {
@@ -519,7 +519,7 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
       int outputColumn = outputProjection[i];
       vOutContext.addProjectionColumn(columnName, outputColumn);
 
-      if (LOG.isDebugEnabled()) {
+      if (isLogDebugEnabled) {
         LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor addProjectionColumn " + i + " columnName " + columnName + " outputColumn " + outputColumn);
       }
     }
@@ -552,7 +552,7 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
   protected void initializeOp(Configuration hconf) throws HiveException {
     super.initializeOp(hconf);
 
-    if (LOG.isDebugEnabled()) {
+    if (isLogDebugEnabled) {
       // Determine the name of our map or reduce task for debug tracing.
       BaseWork work = Utilities.getMapWork(hconf);
       if (work == null) {
@@ -599,7 +599,7 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
     needCommonSetup = true;
     needHashTableSetup = true;
 
-    if (LOG.isDebugEnabled()) {
+    if (isLogDebugEnabled) {
       int[] currentScratchColumns = vOutContext.currentScratchColumns();
       LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator initializeOp currentScratchColumns " + Arrays.toString(currentScratchColumns));
 
@@ -736,7 +736,7 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
 
       overflowBatch.cols[outputColumn] = VectorizedRowBatchCtx.allocateColumnVector(columnVectorTypeName, VectorizedRowBatch.DEFAULT_SIZE);
 
-      if (LOG.isDebugEnabled()) {
+      if (isLogDebugEnabled) {
         LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator initializeOp overflowBatch outputColumn " + outputColumn + " class " + overflowBatch.cols[outputColumn].getClass().getSimpleName());
       }
     }
@@ -747,7 +747,7 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
    */
   protected void commonSetup(VectorizedRowBatch batch) throws HiveException {
 
-    if (LOG.isDebugEnabled()) {
+    if (isLogDebugEnabled) {
       LOG.debug("VectorMapJoinInnerCommonOperator commonSetup begin...");
       displayBatchColumns(batch, "batch");
       displayBatchColumns(overflowBatch, "overflowBatch");

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
index 4e2bd7b..b20cca4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
@@ -517,7 +517,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
         smallTable);
     needHashTableSetup = true;
 
-    if (LOG.isDebugEnabled()) {
+    if (isLogDebugEnabled) {
       LOG.debug(CLASS_NAME + " reloadHashTable!");
     }
   }
@@ -526,7 +526,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
   protected void reProcessBigTable(int partitionId)
       throws HiveException {
 
-    if (LOG.isDebugEnabled()) {
+    if (isLogDebugEnabled) {
       LOG.debug(CLASS_NAME + " reProcessBigTable enter...");
     }
 
@@ -577,7 +577,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
       throw new HiveException(e);
     }
 
-    if (LOG.isDebugEnabled()) {
+    if (isLogDebugEnabled) {
       LOG.debug(CLASS_NAME + " reProcessBigTable exit! " + rowCount + " row processed and " + batchCount + " batches processed");
     }
   }
@@ -641,7 +641,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
     if (!aborted && overflowBatch.size > 0) {
       forwardOverflow();
     }
-    if (LOG.isDebugEnabled()) {
+    if (isLogDebugEnabled) {
       LOG.debug("VectorMapJoinInnerLongOperator closeOp " + batchCounter + " batches processed");
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
index 7517802..e8b722e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
@@ -138,7 +138,7 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -192,7 +192,7 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]);
@@ -202,7 +202,7 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -344,7 +344,7 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
index 02a3746..e016013 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
@@ -143,7 +143,7 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -208,7 +208,7 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]);
@@ -218,7 +218,7 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -365,7 +365,7 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
index 7c27b44..c07d353 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
@@ -129,7 +129,7 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -184,7 +184,7 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]);
@@ -194,7 +194,7 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -336,7 +336,7 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
index 4e31a10..92d7328 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
@@ -136,7 +136,7 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -190,7 +190,7 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerRepeated(batch, joinResult, hashMapResults[0]);
@@ -200,7 +200,7 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -341,7 +341,7 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
index 6b63200..eb78174 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
@@ -140,7 +140,7 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -205,7 +205,7 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerRepeated(batch, joinResult, hashMapResults[0]);
@@ -215,7 +215,7 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -361,7 +361,7 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
index 93331aa..4b508d4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
@@ -127,7 +127,7 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -179,7 +179,7 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishInnerRepeated(batch, joinResult, hashMapResults[0]);
@@ -189,7 +189,7 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -330,7 +330,7 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
index 9f6a822..762b6fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
@@ -138,7 +138,7 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -192,7 +192,7 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]);
@@ -202,7 +202,7 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -337,7 +337,7 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " spills " + intArrayToRangesString(spills, spillCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
index f03bf6f..a7a51f7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
@@ -142,7 +142,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -208,7 +208,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]);
@@ -218,7 +218,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -361,7 +361,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " spills " + intArrayToRangesString(spills, spillCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
index ef525d9..eaa3af4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
@@ -129,7 +129,7 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -181,7 +181,7 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]);
@@ -191,7 +191,7 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -326,7 +326,7 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME +
               " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) +
               " spills " + intArrayToRangesString(spills, spillCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
index 25aa941..5a88784 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java
@@ -436,7 +436,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
       int nonSpillCount = subtractFromInputSelected(
               inputSelectedInUse, inputLogicalSize, spills, spillCount, nonSpills);
 
-      if (LOG.isDebugEnabled()) {
+      if (isLogDebugEnabled) {
         LOG.debug("finishOuter spillCount > 0" +
             " nonSpills " + intArrayToRangesString(nonSpills, nonSpillCount));
       }
@@ -452,7 +452,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
         noMatchCount = subtract(nonSpills, nonSpillCount, allMatchs, allMatchCount,
                 noMatchs);
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug("finishOuter spillCount > 0" +
               " noMatchs " + intArrayToRangesString(noMatchs, noMatchCount));
         }
@@ -467,7 +467,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
         noMatchCount = subtractFromInputSelected(
             inputSelectedInUse, inputLogicalSize, allMatchs, allMatchCount, noMatchs);
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug("finishOuter spillCount == 0" +
               " noMatchs " + intArrayToRangesString(noMatchs, noMatchCount));
         }
@@ -501,7 +501,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
       batch.size = numSel;
       batch.selectedInUse = true;
 
-      if (LOG.isDebugEnabled()) {
+      if (isLogDebugEnabled) {
         LOG.debug("finishOuter allMatchCount > 0" +
             " batch.selected " + intArrayToRangesString(batch.selected, batch.size));
       }
@@ -519,7 +519,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
         int mergeCount = sortMerge(
                 noMatchs, noMatchCount, batch.selected, batch.size, merged);
     
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug("finishOuter noMatchCount > 0 && batch.size > 0" +
               " merged " + intArrayToRangesString(merged, mergeCount));
         }
@@ -537,7 +537,7 @@ public abstract class VectorMapJoinOuterGenerateResultOperator
         batch.size = noMatchCount;
         batch.selectedInUse = true;
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug("finishOuter noMatchCount > 0 && batch.size == 0" +
               " batch.selected " + intArrayToRangesString(batch.selected, batch.size));
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
index 355676a..5b687fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
@@ -128,7 +128,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -157,7 +157,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
           ve.evaluate(batch);
         }
         someRowsFilteredOut = (batch.size != inputLogicalSize);
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           if (batch.selectedInUse) {
             if (inputSelectedInUse) {
               LOG.debug(CLASS_NAME +
@@ -230,7 +230,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishOuterRepeated(batch, joinResult, hashMapResults[0], someRowsFilteredOut,
@@ -241,7 +241,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -411,7 +411,7 @@ public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateRe
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
index 2c98a24..e212a2a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
@@ -131,7 +131,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -160,7 +160,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
           ve.evaluate(batch);
         }
         someRowsFilteredOut = (batch.size != inputLogicalSize);
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           if (batch.selectedInUse) {
             if (inputSelectedInUse) {
               LOG.debug(CLASS_NAME +
@@ -247,7 +247,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishOuterRepeated(batch, joinResult, hashMapResults[0], someRowsFilteredOut,
@@ -258,7 +258,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -427,7 +427,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
index 24496d9..e4107ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
@@ -118,7 +118,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
       final int inputLogicalSize = batch.size;
 
       if (inputLogicalSize == 0) {
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty");
         }
         return;
@@ -147,7 +147,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
           ve.evaluate(batch);
         }
         someRowsFilteredOut = (batch.size != inputLogicalSize);
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           if (batch.selectedInUse) {
             if (inputSelectedInUse) {
               LOG.debug(CLASS_NAME +
@@ -218,7 +218,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
          * Common repeated join result processing.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
         }
         finishOuterRepeated(batch, joinResult, hashMapResults[0], someRowsFilteredOut,
@@ -229,7 +229,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
          * NOT Repeating.
          */
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
         }
 
@@ -397,7 +397,7 @@ public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerate
           }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (isLogDebugEnabled) {
           LOG.debug(CLASS_NAME + " batch #" + batchCounter +
               " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) +
               " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) +

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
index b978bf0..6b536f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java
@@ -38,6 +38,8 @@ public abstract class VectorMapJoinFastBytesHashTable
 
   private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastBytesHashTable.class);
 
+  private final boolean isLogDebugEnabled = LOG.isDebugEnabled();
+
   protected VectorMapJoinFastKeyStore keyStore;
 
   private BytesWritable testKeyBytesWritable;
@@ -98,7 +100,7 @@ public abstract class VectorMapJoinFastBytesHashTable
     }
 
     if (largestNumberOfSteps < i) {
-      if (LOG.isDebugEnabled()) {
+      if (isLogDebugEnabled) {
         LOG.debug("Probed " + i + " slots (the longest so far) to find space");
       }
       largestNumberOfSteps = i;
@@ -148,7 +150,7 @@ public abstract class VectorMapJoinFastBytesHashTable
         }
 
         if (newLargestNumberOfSteps < i) {
-          if (LOG.isDebugEnabled()) {
+          if (isLogDebugEnabled) {
             LOG.debug("Probed " + i + " slots (the longest so far) to find space");
           }
           newLargestNumberOfSteps = i;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad127657/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
index 7ea3455..dfc9bf1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
@@ -46,6 +46,8 @@ public abstract class VectorMapJoinFastLongHashTable
 
   public static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastLongHashTable.class);
 
+  private transient final boolean isLogDebugEnabled = LOG.isDebugEnabled();
+
   private HashTableKeyType hashTableKeyType;
 
   private boolean isOuterJoin;
@@ -139,7 +141,7 @@ public abstract class VectorMapJoinFastLongHashTable
     }
 
     if (largestNumberOfSteps < i) {
-      if (LOG.isDebugEnabled()) {
+      if (isLogDebugEnabled) {
         LOG.debug("Probed " + i + " slots (the longest so far) to find space");
       }
       largestNumberOfSteps = i;
@@ -199,7 +201,7 @@ public abstract class VectorMapJoinFastLongHashTable
         }
 
         if (newLargestNumberOfSteps < i) {
-          if (LOG.isDebugEnabled()) {
+          if (isLogDebugEnabled) {
             LOG.debug("Probed " + i + " slots (the longest so far) to find space");
           }
           newLargestNumberOfSteps = i;


[31/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index 04eaaa1..bcf5944 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -117,14 +117,14 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	6	6984454.211097692	-4033.445769230769	6967702.8672438458471
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.746153846154	-11712.276923076923	12625.04759999997746
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	3	6984454.211097692	-617.5607769230769	6983219.0895438458462
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2	6984454.211097692	1833.9456923076925	6986288.1567899996925
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
 PREHOOK: query: -- Now add the others...
 EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
@@ -221,11 +221,11 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.211097692	-4033.445769230769	6967702.8672438458471	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	-1941.6364148649	1426.0153418918999	2016.6902366556308	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459	-2325.50327307692295	1707.9424961538462	2415.395441814127
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.746153846154	-11712.276923076923	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.211097692	-617.5607769230769	6983219.0895438458462	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2916536.7443268917	2915005.5249214866	4122440.3477364695	2	6984454.211097692	1833.9456923076925	6986288.1567899996925	3493144.07839499984625	3491310.1327026924	4937458.140118758
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536	1956.576923076922966667	6821.495748565159	6822.606289190924
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	-1941.63641486490000	1426.0153418918999	2016.6902366556308	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590	-2325.503273076922950000	1707.9424961538462	2415.395441814127
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2916536.74432689170000	2915005.5249214866	4122440.3477364695	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250	3493144.078394999846250000	3491310.1327026924	4937458.140118758
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360	1956.576923076922966667	6821.495748565159	6822.606289190924

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
index e9fc3f8..d138102 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
@@ -161,109 +161,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/sum_expr_with_order.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sum_expr_with_order.q.out b/ql/src/test/results/clientpositive/sum_expr_with_order.q.out
index 00318e8..5e00930 100644
--- a/ql/src/test/results/clientpositive/sum_expr_with_order.q.out
+++ b/ql/src/test/results/clientpositive/sum_expr_with_order.q.out
@@ -12,4 +12,4 @@ order by c1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-13009100
+13009100.000

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
index 4b39b2c..69fab90 100644
--- a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
@@ -1290,105 +1290,105 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_mapjoin
 #### A masked pattern was here ####
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	6984454.211097692
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	6984454.21109769200000
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1478,105 +1478,105 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_mapjoin
 #### A masked pattern was here ####
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	6984454.211097692
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	6984454.21109769200000
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
 PREHOOK: query: DROP TABLE decimal_mapjoin
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_mapjoin

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
index 7ca537d..4b15062 100644
--- a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
+++ b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
@@ -166,112 +166,112 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -282,109 +282,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/update_all_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/update_all_types.q.out b/ql/src/test/results/clientpositive/tez/update_all_types.q.out
index 1cfa088..c5c1abb 100644
--- a/ql/src/test/results/clientpositive/tez/update_all_types.q.out
+++ b/ql/src/test/results/clientpositive/tez/update_all_types.q.out
@@ -96,11 +96,11 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+-51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-741	-1070883071	-1070883071	-1645852809	NULL	-741.0	NULL	NULL	1969-12-31	0ruyd6Y50JpdGRf6HqD	0ruyd6Y50JpdGRf6HqD	xH7445Rals48VOulSyR5F               	false
@@ -150,12 +150,12 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+-51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
 1	2	-1070883071	3	4	3.14	6.28	5.99	NULL	2014-09-01	its a beautiful day in the neighbhorhood	a beautiful day for a neighbor	wont you be mine                    	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-947	-1070551679	-1070551679	1864027286	NULL	-947.0	NULL	NULL	1969-12-31	iUR3Q	iUR3Q	4KWs6gw7lv2WYd66P                   	false
@@ -184,12 +184,12 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--102	-51	-1071480828	-1071480828	-1401575336	-51.0	-51.0	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+-102	-51	-1071480828	-1071480828	-1401575336	-51.0	-51.0	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
 1	2	-1070883071	3	4	3.14	6.28	5.99	NULL	2014-09-01	its a beautiful day in the neighbhorhood	a beautiful day for a neighbor	wont you be mine                    	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-947	-1070551679	-1070551679	1864027286	NULL	-947.0	NULL	NULL	1969-12-31	iUR3Q	iUR3Q	4KWs6gw7lv2WYd66P                   	false

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
index d6a8517..0be71b1 100644
--- a/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
@@ -170,4 +170,4 @@ select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vectortab2korc
 #### A masked pattern was here ####
--4997414117561.546875	4994550248722.298828	-10252745435816.02441	-5399023399.587163986308583465
+-4997414117561.546875000000000000	4994550248722.298828000000000000	-10252745435816.024410000000000000	-5399023399.587163986308583465

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
index 1d720c0..11c3d71 100644
--- a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
@@ -650,34 +650,34 @@ POSTHOOK: Input: default@decimal_date_test
 -18.5162162162
 -17.3216216216
 -16.7243243243
--16.127027027
+-16.1270270270
 -15.5297297297
 -10.7513513514
 -9.5567567568
 -8.3621621622
--5.972972973
+-5.9729729730
 -3.5837837838
 4.1810810811
 4.7783783784
 4.7783783784
 5.3756756757
-5.972972973
-5.972972973
+5.9729729730
+5.9729729730
 11.3486486486
 11.3486486486
 11.9459459459
 14.9324324324
 19.1135135135
 20.3081081081
-22.1
+22.1000000000
 24.4891891892
 33.4486486486
 34.6432432432
 40.0189189189
 42.4081081081
 43.0054054054
-44.2
-44.2
+44.2000000000
+44.2000000000
 44.7972972973
 45.9918918919
 PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
index 331edd0..12920d2 100644
--- a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
@@ -204,13 +204,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
index 9474c2c..8a21697 100644
--- a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
@@ -159,7 +159,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education
@@ -252,7 +252,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
index fc37e0d..3de006c 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
@@ -1184,7 +1184,7 @@ POSTHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_2
 #### A masked pattern was here ####
-1355944339.1234567
+1355944339.12345670
 PREHOOK: query: explain
 select cast(true as decimal) as c from decimal_2 order by c
 PREHOOK: type: QUERY
@@ -1588,7 +1588,7 @@ POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_2
 #### A masked pattern was here ####
-1
+1.0000000000000000000
 PREHOOK: query: explain
 select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
index 75f872e..eea91bb 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
@@ -47,43 +47,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -92,43 +92,43 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-1234567890.12345678	1234567890
-200	200
-125.2	125
-124	124
-100	100
-20	20
-10	10
-3.14	4
-3.14	3
-3.14	3
-3.14	3
-2	2
-2	2
-1.122	1
-1.12	1
-1	1
-1	1
-1	1
-0.333	0
-0.33	0
-0.3	0
-0.2	0
-0.1	0
-0.02	0
-0.01	0
-0	0
-0	0
-0	0
--0.3	0
--0.33	0
--0.333	0
--1.12	-1
--1.12	-1
--1.122	-11
--1255.49	-1255
--4400	4400
--1234567890.123456789	-1234567890
+1234567890.123456780000000000	1234567890
+200.000000000000000000	200
+125.200000000000000000	125
+124.000000000000000000	124
+100.000000000000000000	100
+20.000000000000000000	20
+10.000000000000000000	10
+3.140000000000000000	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+2.000000000000000000	2
+2.000000000000000000	2
+1.122000000000000000	1
+1.120000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+0.333000000000000000	0
+0.330000000000000000	0
+0.300000000000000000	0
+0.200000000000000000	0
+0.100000000000000000	0
+0.020000000000000000	0
+0.010000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+-0.300000000000000000	0
+-0.330000000000000000	0
+-0.333000000000000000	0
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-1.122000000000000000	-11
+-1255.490000000000000000	-1255
+-4400.000000000000000000	4400
+-1234567890.123456789000000000	-1234567890
 NULL	0
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -139,43 +139,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -185,34 +185,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL
--1234567890.123456789
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
-1234567890.12345678
+-1234567890.123456789000000000
+-4400.000000000000000000
+-1255.490000000000000000
+-1.122000000000000000
+-1.120000000000000000
+-0.333000000000000000
+-0.330000000000000000
+-0.300000000000000000
+0.000000000000000000
+0.010000000000000000
+0.020000000000000000
+0.100000000000000000
+0.200000000000000000
+0.300000000000000000
+0.330000000000000000
+0.333000000000000000
+1.000000000000000000
+1.120000000000000000
+1.122000000000000000
+2.000000000000000000
+3.140000000000000000
+10.000000000000000000
+20.000000000000000000
+100.000000000000000000
+124.000000000000000000
+125.200000000000000000
+200.000000000000000000
+1234567890.123456780000000000
 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -222,34 +222,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-2
--0.333	0
--0.33	0
--0.3	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	3
-1.12	1
-1.122	1
-2	4
-3.14	13
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-2
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	3
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	4
+3.140000000000000000	13
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -258,23 +258,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789
--1255	-1255.49
--11	-1.122
--1	-2.24
-0	0.33
-1	5.242
-2	4
-3	9.42
-4	3.14
-10	10
-20	20
-100	100
-124	124
-125	125.2
-200	200
-4400	-4400
-1234567890	1234567890.12345678
+-1234567890	-1234567890.123456789000000000
+-1255	-1255.490000000000000000
+-11	-1.122000000000000000
+-1	-2.240000000000000000
+0	0.330000000000000000
+1	5.242000000000000000
+2	4.000000000000000000
+3	9.420000000000000000
+4	3.140000000000000000
+10	10.000000000000000000
+20	20.000000000000000000
+100	100.000000000000000000
+124	124.000000000000000000
+125	125.200000000000000000
+200	200.000000000000000000
+4400	-4400.000000000000000000
+1234567890	1234567890.123456780000000000
 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -283,71 +283,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -356,10 +356,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -368,10 +368,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: DROP TABLE DECIMAL_3_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_3_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out
index 613f5a8..c7d3d9e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out
@@ -57,43 +57,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_1
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
+-1234567890.1234567890000000000000000	-1234567890
+-4400.0000000000000000000000000	4400
+-1255.4900000000000000000000000	-1255
+-1.1220000000000000000000000	-11
+-1.1200000000000000000000000	-1
+-1.1200000000000000000000000	-1
+-0.3330000000000000000000000	0
+-0.3300000000000000000000000	0
+-0.3000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0100000000000000000000000	0
+0.0200000000000000000000000	0
+0.1000000000000000000000000	0
+0.2000000000000000000000000	0
+0.3000000000000000000000000	0
+0.3300000000000000000000000	0
+0.3330000000000000000000000	0
 0.9999999999999999999999999	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+1.0000000000000000000000000	1
+1.0000000000000000000000000	1
+1.1200000000000000000000000	1
+1.1220000000000000000000000	1
+2.0000000000000000000000000	2
+2.0000000000000000000000000	2
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	4
+10.0000000000000000000000000	10
+20.0000000000000000000000000	20
+100.0000000000000000000000000	100
+124.0000000000000000000000000	124
+125.2000000000000000000000000	125
+200.0000000000000000000000000	200
+1234567890.1234567800000000000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -103,43 +103,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -149,43 +149,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -195,43 +195,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: DROP TABLE DECIMAL_4_1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out
index 34c3351..0bfd12e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out
@@ -59,41 +59,41 @@ POSTHOOK: Input: default@decimal_5
 NULL
 NULL
 NULL
--4400
--1255.49
--1.122
--1.12
--1.12
--0.333
--0.33
--0.3
-0
-0
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1
-1
-1.12
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.00000
+1.00000
+1.12000
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -103,32 +103,32 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.12000
+1.12200
+2.00000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -185,40 +185,40 @@ POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
+0.000
+0.000
+100.000
+10.000
+1.000
+0.100
+0.010
+200.000
+20.000
+2.000
+0.000
+0.200
+0.020
+0.300
+0.330
 0.333
--0.3
--0.33
+-0.300
+-0.330
 -0.333
-1
-2
-3.14
--1.12
--1.12
+1.000
+2.000
+3.140
+-1.120
+-1.120
 -1.122
-1.12
+1.120
 1.122
-124
-125.2
+124.000
+125.200
 NULL
-3.14
-3.14
-3.14
-1
+3.140
+3.140
+3.140
+1.000
 NULL
 NULL
 PREHOOK: query: DROP TABLE DECIMAL_5_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
index 9cdd7fc..e0ccbc6 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
@@ -119,27 +119,27 @@ NULL	0
 NULL	3
 NULL	4
 NULL	1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--0.333	0
--0.3	0
-0	0
-0	0
-0.333	0
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-3.14	3
-3.14	3
-3.14	4
-10	10
+-4400.00000	4400
+-1255.49000	-1255
+-1.12200	-11
+-1.12000	-1
+-0.33300	0
+-0.30000	0
+0.00000	0
+0.00000	0
+0.33300	0
+1.00000	1
+1.00000	1
+1.12000	1
+1.12200	1
+2.00000	2
+3.14000	3
+3.14000	3
+3.14000	4
+10.00000	10
 10.73433	5
-124	124
-125.2	125
+124.00000	124
+125.20000	125
 23232.23435	2
 PREHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -151,27 +151,27 @@ POSTHOOK: Input: default@decimal_6_2
 #### A masked pattern was here ####
 NULL	0
 -1234567890.1235	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--0.333	0
--0.3	0
-0	0
-0	0
-0.333	0
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-3.14	3
-3.14	3
-3.14	4
-10	10
+-4400.0000	4400
+-1255.4900	-1255
+-1.1220	-11
+-1.1200	-1
+-0.3330	0
+-0.3000	0
+0.0000	0
+0.0000	0
+0.3330	0
+1.0000	1
+1.0000	1
+1.1200	1
+1.1220	1
+2.0000	2
+3.1400	3
+3.1400	3
+3.1400	4
+10.0000	10
 10.7343	5
-124	124
-125.2	125
+124.0000	124
+125.2000	125
 23232.2344	2
 2389432.2375	3
 2389432.2375	4
@@ -200,54 +200,54 @@ NULL
 NULL
 NULL
 NULL
--1234567890.1235
--4400
--4400
--1255.49
--1255.49
--1.122
--1.122
--1.12
--1.12
--0.333
--0.333
--0.3
--0.3
-0
-0
-0
-0
-0.333
-0.333
-1
-1
-1
-1
-1.12
-1.12
-1.122
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-3.14
-3.14
-10
-10
-10.7343
+-1234567890.12350
+-4400.00000
+-4400.00000
+-1255.49000
+-1255.49000
+-1.12200
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33300
+-0.30000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.00000
+0.33300
+0.33300
+1.00000
+1.00000
+1.00000
+1.00000
+1.12000
+1.12000
+1.12200
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+10.00000
+10.73430
 10.73433
-124
-124
-125.2
-125.2
+124.00000
+124.00000
+125.20000
+125.20000
 23232.23435
-23232.2344
-2389432.2375
-2389432.2375
-1234567890.1235
+23232.23440
+2389432.23750
+2389432.23750
+1234567890.12350
 PREHOOK: query: CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@decimal_6_1

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
index 683af86..d10f053 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
@@ -117,14 +117,14 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	6	6984454.211097692	-4033.445769230769	6967702.8672438458471
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.746153846154	-11712.276923076923	12625.04759999997746
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	3	6984454.211097692	-617.5607769230769	6983219.0895438458462
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2	6984454.211097692	1833.9456923076925	6986288.1567899996925
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
 PREHOOK: query: -- Now add the others...
 EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
@@ -221,11 +221,11 @@ POSTHOOK: query: SELECT cint,
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_vgby
 #### A masked pattern was here ####
--3728	6	5831542.269248378	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.211097692	-4033.445769230769	6967702.8672438458471	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
--563	2	-515.621072973	-3367.6517567568	-3883.2728297298	-1941.6364148649	1426.0153418918999	2016.6902366556308	2	-617.5607769230769	-4033.445769230769	-4651.0065461538459	-2325.50327307692295	1707.9424961538462	2415.395441814127
-253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.969230769231	-11712.99230769231	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
-528534767	1024	5831542.269248378	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.211097692	-11710.130769230771	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
-626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.746153846154	-11712.276923076923	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
-6981	3	5831542.269248378	-515.621072973	5830511.027102432	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.211097692	-617.5607769230769	6983219.0895438458462	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
-762	2	5831542.269248378	1531.2194054054	5833073.4886537834	2916536.7443268917	2915005.5249214866	4122440.3477364695	2	6984454.211097692	1833.9456923076925	6986288.1567899996925	3493144.07839499984625	3491310.1327026924	4937458.140118758
-NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.715384615385	-5147.907692307693	6010604.3076923073536	1956.576923076922966667	6821.495748565159	6822.606289190924
+-3728	6	5831542.2692483780	-3367.6517567568	5817556.0411483778	969592.67352472963333	2174330.2092403853	2381859.406131774	6	6984454.21109769200000	-4033.44576923076900	6967702.86724384584710	1161283.811207307641183333	2604201.2704476737	2852759.5602156054
+-563	2	-515.6210729730	-3367.6517567568	-3883.2728297298	-1941.63641486490000	1426.0153418918999	2016.6902366556308	2	-617.56077692307690	-4033.44576923076900	-4651.00654615384590	-2325.503273076922950000	1707.9424961538462	2415.395441814127
+253665376	1024	9767.0054054054	-9779.5486486487	-347484.0818378374	-339.33992366976309	5708.9563478862	5711.745967572779	1024	11697.96923076923100	-11712.99230769231000	-416182.64030769233089	-406.428359675480791885	6837.632716002934	6840.973851172274
+528534767	1024	5831542.2692483780	-9777.1594594595	11646372.8607481068	11373.41099682432305	257528.92988206653	257654.7686043977	1024	6984454.21109769200000	-11710.13076923077100	13948892.79980307629003	13621.965624807691689482	308443.1074570801	308593.82484083984
+626923679	1024	9723.4027027027	-9778.9513513514	10541.0525297287	10.29399661106318	5742.09145323734	5744.897264034267	1024	11645.74615384615400	-11712.27692307692300	12625.04759999997746	12.329148046874977988	6877.318722794877	6880.679250101603
+6981	3	5831542.2692483780	-515.6210729730	5830511.0271024320	1943503.67570081066667	2749258.455012492	3367140.1929065133	3	6984454.21109769200000	-617.56077692307690	6983219.08954384584620	2327739.696514615282066667	3292794.4113115156	4032833.0678006653
+762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2916536.74432689170000	2915005.5249214866	4122440.3477364695	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250	3493144.078394999846250000	3491310.1327026924	4937458.140118758
+NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	1633.60810810806667	5695.483082135364	5696.4103077145055	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360	1956.576923076922966667	6821.495748565159	6822.606289190924

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
index 59b80f2..35b7e87 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
@@ -29,13 +29,13 @@ POSTHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS D
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--13326.0	528534767	true	1969-12-31 15:59:46.674	-13326	528534767	1	-13
--15813.0	528534767	true	1969-12-31 15:59:55.787	-15813	528534767	1	-4
--9566.0	528534767	true	1969-12-31 15:59:44.187	-9566	528534767	1	-16
-15007.0	528534767	true	1969-12-31 15:59:50.434	15007	528534767	1	-10
-7021.0	528534767	true	1969-12-31 16:00:15.007	7021	528534767	1	15
-4963.0	528534767	true	1969-12-31 16:00:07.021	4963	528534767	1	7
--7824.0	528534767	true	1969-12-31 16:00:04.963	-7824	528534767	1	5
--15431.0	528534767	true	1969-12-31 15:59:52.176	-15431	528534767	1	-8
--15549.0	528534767	true	1969-12-31 15:59:44.569	-15549	528534767	1	-15
-5780.0	528534767	true	1969-12-31 15:59:44.451	5780	528534767	1	-16
+-13326.0	528534767	true	1969-12-31 15:59:46.674	-13326.0000000000	528534767.00000000000000	1.00	-13
+-15813.0	528534767	true	1969-12-31 15:59:55.787	-15813.0000000000	528534767.00000000000000	1.00	-4
+-9566.0	528534767	true	1969-12-31 15:59:44.187	-9566.0000000000	528534767.00000000000000	1.00	-16
+15007.0	528534767	true	1969-12-31 15:59:50.434	15007.0000000000	528534767.00000000000000	1.00	-10
+7021.0	528534767	true	1969-12-31 16:00:15.007	7021.0000000000	528534767.00000000000000	1.00	15
+4963.0	528534767	true	1969-12-31 16:00:07.021	4963.0000000000	528534767.00000000000000	1.00	7
+-7824.0	528534767	true	1969-12-31 16:00:04.963	-7824.0000000000	528534767.00000000000000	1.00	5
+-15431.0	528534767	true	1969-12-31 15:59:52.176	-15431.0000000000	528534767.00000000000000	1.00	-8
+-15549.0	528534767	true	1969-12-31 15:59:44.569	-15549.0000000000	528534767.00000000000000	1.00	-15
+5780.0	528534767	true	1969-12-31 15:59:44.451	5780.0000000000	528534767.00000000000000	1.00	-16

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
index 366d883..08c3ae9 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
@@ -84,13 +84,13 @@ LIMIT 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_test
 #### A masked pattern was here ####
-1836.441995841977	-1166.027234927254	0.8372697814833714	245972.55810810256	5.6189189189	835	1000	-24	835	true	1000.823076923077	835.6189	1000.823076923077	1969-12-31 16:13:55.618918918
-1856.1322245322462	-1178.5293139292924	0.8372449787014038	251275.4432432497	4.5783783784	844	1011	-13	844	true	1011.5538461538462	844.57837	1011.5538461538462	1969-12-31 16:14:04.578378378
-1858.7575883576155	-1180.196257796231	0.837241711366943	251986.76756757565	5.772972973	845	1012	-12	845	true	1012.9846153846155	845.77295	1012.9846153846155	1969-12-31 16:14:05.772972973
-1862.6956340956693	-1182.6966735966386	0.8372368276344616	253055.6391891997	7.5648648649	847	1015	-9	847	true	1015.1307692307693	847.5649	1015.1307692307693	1969-12-31 16:14:07.564864864
-1883.6985446985233	-1196.0322245322466	0.8372111259286499	258794.49324323673	7.1216216216	857	1026	2	857	true	1026.5769230769233	857.12164	1026.5769230769233	1969-12-31 16:14:17.121621621
-1886.3239085238924	-1197.6991683991848	0.8372079534581902	259516.37432431948	8.3162162162	858	1028	4	858	true	1028.0076923076924	858.3162	1028.0076923076924	1969-12-31 16:14:18.316216216
-1887.636590436577	-1198.532640332654	0.8372063705322131	259877.69189188787	8.9135135135	858	1028	4	858	true	1028.723076923077	858.9135	1028.723076923077	1969-12-31 16:14:18.913513513
-1895.5126819126846	-1203.5334719334692	0.8371969190171343	262050.87567567648	2.4972972973	862	1033	9	862	true	1033.0153846153846	862.4973	1033.0153846153846	1969-12-31 16:14:22.497297297
-1909.9521829522155	-1212.701663201631	0.8371797936946236	266058.54729730723	9.0675675676	869	1040	16	869	true	1040.8846153846155	869.06757	1040.8846153846155	1969-12-31 16:14:29.067567567
-1913.8902286902692	-1215.2020790020384	0.8371751679995797	267156.8270270395	0.8594594595	870	1043	19	870	true	1043.0307692307692	870.85944	1043.0307692307692	1969-12-31 16:14:30.859459459
+1836.44199584197700	-1166.02723492725400	0.8372697814833714	245972.55810810256	5.6189189189	835	1000	-24	835	true	1000.823076923077	835.6189	1000.823076923077	1969-12-31 16:13:55.618918918
+1856.13222453224620	-1178.52931392929240	0.8372449787014038	251275.4432432497	4.5783783784	844	1011	-13	844	true	1011.5538461538462	844.57837	1011.5538461538462	1969-12-31 16:14:04.578378378
+1858.75758835761550	-1180.19625779623100	0.837241711366943	251986.76756757565	5.7729729730	845	1012	-12	845	true	1012.9846153846155	845.77295	1012.9846153846155	1969-12-31 16:14:05.772972973
+1862.69563409566930	-1182.69667359663860	0.8372368276344616	253055.6391891997	7.5648648649	847	1015	-9	847	true	1015.1307692307693	847.5649	1015.1307692307693	1969-12-31 16:14:07.564864864
+1883.69854469852330	-1196.03222453224660	0.8372111259286499	258794.49324323673	7.1216216216	857	1026	2	857	true	1026.5769230769233	857.12164	1026.5769230769233	1969-12-31 16:14:17.121621621
+1886.32390852389240	-1197.69916839918480	0.8372079534581902	259516.37432431948	8.3162162162	858	1028	4	858	true	1028.0076923076924	858.3162	1028.0076923076924	1969-12-31 16:14:18.316216216
+1887.63659043657700	-1198.53264033265400	0.8372063705322131	259877.69189188787	8.9135135135	858	1028	4	858	true	1028.723076923077	858.9135	1028.723076923077	1969-12-31 16:14:18.913513513
+1895.51268191268460	-1203.53347193346920	0.8371969190171343	262050.87567567648	2.4972972973	862	1033	9	862	true	1033.0153846153846	862.4973	1033.0153846153846	1969-12-31 16:14:22.497297297
+1909.95218295221550	-1212.70166320163100	0.8371797936946236	266058.54729730723	9.0675675676	869	1040	16	869	true	1040.8846153846155	869.06757	1040.8846153846155	1969-12-31 16:14:29.067567567
+1913.89022869026920	-1215.20207900203840	0.8371751679995797	267156.8270270395	0.8594594595	870	1043	19	870	true	1043.0307692307692	870.85944	1043.0307692307692	1969-12-31 16:14:30.859459459

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out
index 240b875..3712549 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out
@@ -156,109 +156,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9


[30/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
index 8b6614e..0b14304 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
@@ -99,13 +99,13 @@ NULL
 NULL
 NULL
 NULL
-0
-0
-0
-0
-0
-0.123456789
-0.123456789
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1234567890
+0.1234567890
 1.2345678901
 1.2345678901
 1.2345678901
@@ -122,14 +122,14 @@ NULL
 12345.6789012346
 123456.7890123456
 123456.7890123457
-1234567.890123456
+1234567.8901234560
 1234567.8901234568
-12345678.90123456
+12345678.9012345600
 12345678.9012345679
-123456789.0123456
+123456789.0123456000
 123456789.0123456789
-1234567890.123456
-1234567890.123456789
+1234567890.1234560000
+1234567890.1234567890
 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -182,13 +182,13 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0.123456789	1.123456789	-0.876543211
-0.123456789	1.123456789	-0.876543211
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.1234567890	1.1234567890	-0.8765432110
+0.1234567890	1.1234567890	-0.8765432110
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
@@ -205,14 +205,14 @@ NULL	NULL	NULL
 12345.6789012346	12346.6789012346	12344.6789012346
 123456.7890123456	123457.7890123456	123455.7890123456
 123456.7890123457	123457.7890123457	123455.7890123457
-1234567.890123456	1234568.890123456	1234566.890123456
+1234567.8901234560	1234568.8901234560	1234566.8901234560
 1234567.8901234568	1234568.8901234568	1234566.8901234568
-12345678.90123456	12345679.90123456	12345677.90123456
+12345678.9012345600	12345679.9012345600	12345677.9012345600
 12345678.9012345679	12345679.9012345679	12345677.9012345679
-123456789.0123456	123456790.0123456	123456788.0123456
+123456789.0123456000	123456790.0123456000	123456788.0123456000
 123456789.0123456789	123456790.0123456789	123456788.0123456789
-1234567890.123456	1234567891.123456	1234567889.123456
-1234567890.123456789	1234567891.123456789	1234567889.123456789
+1234567890.1234560000	1234567891.1234560000	1234567889.1234560000
+1234567890.1234567890	1234567891.1234567890	1234567889.1234567890
 PREHOOK: query: SELECT dec, dec * 2, dec / 3  FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -265,37 +265,37 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0.123456789	0.246913578	0.041152263
-0.123456789	0.246913578	0.041152263
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.1234567890	0.2469135780	0.041152263000
+0.1234567890	0.2469135780	0.041152263000
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
 12345.6789012346	24691.3578024692	4115.226300411533
 12345.6789012346	24691.3578024692	4115.226300411533
-123456.7890123456	246913.5780246912	41152.2630041152
+123456.7890123456	246913.5780246912	41152.263004115200
 123456.7890123457	246913.5780246914	41152.263004115233
-1234567.890123456	2469135.780246912	411522.630041152
+1234567.8901234560	2469135.7802469120	411522.630041152000
 1234567.8901234568	2469135.7802469136	411522.630041152267
-12345678.90123456	24691357.80246912	4115226.30041152
+12345678.9012345600	24691357.8024691200	4115226.300411520000
 12345678.9012345679	24691357.8024691358	4115226.300411522633
-123456789.0123456	246913578.0246912	41152263.0041152
-123456789.0123456789	246913578.0246913578	41152263.0041152263
-1234567890.123456	2469135780.246912	411522630.041152
-1234567890.123456789	2469135780.246913578	411522630.041152263
+123456789.0123456000	246913578.0246912000	41152263.004115200000
+123456789.0123456789	246913578.0246913578	41152263.004115226300
+1234567890.1234560000	2469135780.2469120000	411522630.041152000000
+1234567890.1234567890	2469135780.2469135780	411522630.041152263000
 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -348,13 +348,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.013717421
-0.123456789	0.013717421
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.1234567890	0.013717421000
+0.1234567890	0.013717421000
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
@@ -371,14 +371,14 @@ NULL	NULL
 12345.6789012346	1371.742100137178
 123456.7890123456	13717.421001371733
 123456.7890123457	13717.421001371744
-1234567.890123456	137174.210013717333
+1234567.8901234560	137174.210013717333
 1234567.8901234568	137174.210013717422
-12345678.90123456	1371742.100137173333
+12345678.9012345600	1371742.100137173333
 12345678.9012345679	1371742.100137174211
-123456789.0123456	13717421.001371733333
-123456789.0123456789	13717421.0013717421
-1234567890.123456	137174210.013717333333
-1234567890.123456789	137174210.013717421
+123456789.0123456000	13717421.001371733333
+123456789.0123456789	13717421.001371742100
+1234567890.1234560000	137174210.013717333333
+1234567890.1234567890	137174210.013717421000
 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -431,13 +431,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.0045724736667
-0.123456789	0.0045724736667
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.1234567890	0.0045724736667
+0.1234567890	0.0045724736667
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
@@ -454,14 +454,14 @@ NULL	NULL
 12345.6789012346	457.2473667123926
 123456.7890123456	4572.4736671239111
 123456.7890123457	4572.4736671239148
-1234567.890123456	45724.7366712391111
+1234567.8901234560	45724.7366712391111
 1234567.8901234568	45724.7366712391407
-12345678.90123456	457247.3667123911111
+12345678.9012345600	457247.3667123911111
 12345678.9012345679	457247.3667123914037
-123456789.0123456	4572473.6671239111111
+123456789.0123456000	4572473.6671239111111
 123456789.0123456789	4572473.6671239140333
-1234567890.123456	45724736.6712391111111
-1234567890.123456789	45724736.6712391403333
+1234567890.1234560000	45724736.6712391111111
+1234567890.1234567890	45724736.6712391403333
 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -514,13 +514,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.015241578750190521
-0.123456789	0.015241578750190521
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.1234567890	0.01524157875019052100
+0.1234567890	0.01524157875019052100
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
@@ -537,14 +537,14 @@ NULL	NULL
 12345.6789012346	152415787.53238916034140423716
 123456.7890123456	15241578753.23881726870921383936
 123456.7890123457	15241578753.23884196006701630849
-1234567.890123456	1524157875323.881726870921383936
+1234567.8901234560	1524157875323.88172687092138393600
 1234567.8901234568	1524157875323.88370217954558146624
-12345678.90123456	152415787532388.1726870921383936
+12345678.9012345600	152415787532388.17268709213839360000
 12345678.9012345679	152415787532388.36774881877789971041
-123456789.0123456	15241578753238817.26870921383936
+123456789.0123456000	15241578753238817.26870921383936000000
 123456789.0123456789	15241578753238836.75019051998750190521
-1234567890.123456	NULL
-1234567890.123456789	NULL
+1234567890.1234560000	NULL
+1234567890.1234567890	NULL
 PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
@@ -643,7 +643,7 @@ POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_precision
 #### A masked pattern was here ####
-12345678901234567890.12345678
+12345678901234567890.123456780000000000
 PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
index edde023..8336999 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
@@ -25,7 +25,7 @@ POSTHOOK: query: select * from decimal_tbl_1_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_1_orc
 #### A masked pattern was here ####
-55555
+55555.000000000000000000
 PREHOOK: query: -- EXPLAIN
 -- SELECT dec, round(null), round(null, 0), round(125, null), 
 -- round(1.0/0.0, 0), round(power(-1.0,0.5), 0)
@@ -121,7 +121,7 @@ FROM decimal_tbl_1_orc ORDER BY d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_1_orc
 #### A masked pattern was here ####
-55555	55555	55555	55555	55555	55560	55600	56000	60000	100000	0	0	0
+55555	55555	55555.0	55555.00	55555.000	55560	55600	56000	60000	100000	0	0	0
 PREHOOK: query: create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -150,7 +150,7 @@ POSTHOOK: query: select * from decimal_tbl_2_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_2_orc
 #### A masked pattern was here ####
-125.315	-125.315
+125.315000000000000000	-125.315000000000000000
 PREHOOK: query: EXPLAIN
 SELECT
   round(pos) as p, round(pos, 0),
@@ -240,7 +240,7 @@ FROM decimal_tbl_2_orc ORDER BY p
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_2_orc
 #### A masked pattern was here ####
-125	125	125.3	125.32	125.315	125.315	130	100	0	0	-125	-125	-125.3	-125.32	-125.315	-125.315	-130	-100	0	0
+125	125	125.3	125.32	125.315	125.3150	130	100	0	0	-125	-125	-125.3	-125.32	-125.315	-125.3150	-130	-100	0	0
 PREHOOK: query: create table decimal_tbl_3_orc (dec decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -268,7 +268,7 @@ POSTHOOK: query: select * from decimal_tbl_3_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_3_orc
 #### A masked pattern was here ####
-3.141592653589793
+3.141592653589793000
 PREHOOK: query: EXPLAIN
 SELECT
   round(dec, -15) as d, round(dec, -16),
@@ -402,7 +402,7 @@ FROM decimal_tbl_3_orc ORDER BY d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_3_orc
 #### A masked pattern was here ####
-0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	3	3.1	3.14	3.142	3.1416	3.14159	3.141593	3.1415927	3.14159265	3.141592654	3.1415926536	3.14159265359	3.14159265359	3.1415926535898	3.1415926535898	3.14159265358979	3.141592653589793	3.141592653589793
+0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	3	3.1	3.14	3.142	3.1416	3.14159	3.141593	3.1415927	3.14159265	3.141592654	3.1415926536	3.14159265359	3.141592653590	3.1415926535898	3.1415926535898	3.14159265358979	3.141592653589793	3.1415926535897930
 PREHOOK: query: create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) 
 STORED AS ORC
 PREHOOK: type: CREATETABLE
@@ -431,7 +431,7 @@ POSTHOOK: query: select * from decimal_tbl_4_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_4_orc
 #### A masked pattern was here ####
-1809242.3151111344	-1809242.3151111344
+1809242.315111134400000000	-1809242.315111134400000000
 PREHOOK: query: EXPLAIN
 SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9)
 FROM decimal_tbl_4_orc ORDER BY p

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out
index ffdb1c9..7dea1a2 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out
@@ -73,16 +73,16 @@ POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_trailing
 #### A masked pattern was here ####
-0	0	0
-1	0	0
+0	0.0000	0.00000000
+1	0.0000	0.00000000
 2	NULL	NULL
-3	1	1
-4	10	10
-5	100	100
-6	1000	1000
-7	10000	10000
-8	100000	100000
-9	NULL	1000000
+3	1.0000	1.00000000
+4	10.0000	10.00000000
+5	100.0000	100.00000000
+6	1000.0000	1000.00000000
+7	10000.0000	10000.00000000
+8	100000.0000	100000.00000000
+9	NULL	1000000.00000000
 10	NULL	NULL
 11	NULL	NULL
 12	NULL	NULL
@@ -91,18 +91,18 @@ POSTHOOK: Input: default@decimal_trailing
 15	NULL	NULL
 16	NULL	NULL
 17	NULL	NULL
-18	1	1
-19	10	10
-20	100	100
-21	1000	1000
-22	100000	10000
-23	0	0
-24	0	0
-25	0	0
-26	0	0
-27	0	0
-28	12313.2	134134.312525
-29	99999.999	134134.31242553
+18	1.0000	1.00000000
+19	10.0000	10.00000000
+20	100.0000	100.00000000
+21	1000.0000	1000.00000000
+22	100000.0000	10000.00000000
+23	0.0000	0.00000000
+24	0.0000	0.00000000
+25	0.0000	0.00000000
+26	0.0000	0.00000000
+27	0.0000	0.00000000
+28	12313.2000	134134.31252500
+29	99999.9990	134134.31242553
 PREHOOK: query: DROP TABLE DECIMAL_TRAILING_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_trailing_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
index cc22a56..6df956d 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
@@ -95,44 +95,44 @@ POSTHOOK: query: SELECT key + key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-200
-20
-2
-0.2
-0.02
-400
-40
-4
-0
-0.4
-0.04
-0.6
-0.66
-0.666
--0.6
--0.66
--0.666
-2
-4
-6.28
--2.24
--2.24
--2.244
-2.24
-2.244
-248
-250.4
--2510.98
-6.28
-6.28
-6.28
-2
--2469135780.246913578
-2469135780.24691356
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.2000000000
+0.0200000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.4000000000
+0.0400000000
+0.6000000000
+0.6600000000
+0.6660000000
+-0.6000000000
+-0.6600000000
+-0.6660000000
+2.0000000000
+4.0000000000
+6.2800000000
+-2.2400000000
+-2.2400000000
+-2.2440000000
+2.2400000000
+2.2440000000
+248.0000000000
+250.4000000000
+-2510.9800000000
+6.2800000000
+6.2800000000
+6.2800000000
+2.0000000000
+-2469135780.2469135780
+2469135780.2469135600
 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
@@ -178,44 +178,44 @@ POSTHOOK: query: SELECT key + value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-200
-20
-2
-0.1
-0.01
-400
-40
-4
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-2
-4
-6.14
--2.12
--2.12
--12.122
-2.12
-2.122
-248
-250.2
--2510.49
-6.14
-6.14
-7.14
-2
--2469135780.123456789
-2469135780.12345678
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.1000000000
+0.0100000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+2.0000000000
+4.0000000000
+6.1400000000
+-2.1200000000
+-2.1200000000
+-12.1220000000
+2.1200000000
+2.1220000000
+248.0000000000
+250.2000000000
+-2510.4900000000
+6.1400000000
+6.1400000000
+7.1400000000
+2.0000000000
+-2469135780.1234567890
+2469135780.1234567800
 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
@@ -429,44 +429,44 @@ POSTHOOK: query: SELECT key - key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
@@ -512,44 +512,44 @@ POSTHOOK: query: SELECT key - value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-0
-0
-0
-0.1
-0.01
-0
-0
-0
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-0
-0
-0.14
--0.12
--0.12
-9.878
-0.12
-0.122
-0
-0.2
--0.49
-0.14
-0.14
--0.86
-0
--0.123456789
-0.12345678
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1000000000
+0.0100000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.0000000000
+0.0000000000
+0.1400000000
+-0.1200000000
+-0.1200000000
+9.8780000000
+0.1200000000
+0.1220000000
+0.0000000000
+0.2000000000
+-0.4900000000
+0.1400000000
+0.1400000000
+-0.8600000000
+0.0000000000
+-0.1234567890
+0.1234567800
 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
@@ -763,42 +763,42 @@ POSTHOOK: query: SELECT key * key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-19360000
+19360000.00000000000000000000
 NULL
-0
-0
-10000
-100
-1
-0.01
-0.0001
-40000
-400
-4
-0
-0.04
-0.0004
-0.09
-0.1089
-0.110889
-0.09
-0.1089
-0.110889
-1
-4
-9.8596
-1.2544
-1.2544
-1.258884
-1.2544
-1.258884
-15376
-15675.04
-1576255.1401
-9.8596
-9.8596
-9.8596
-1
+0.00000000000000000000
+0.00000000000000000000
+10000.00000000000000000000
+100.00000000000000000000
+1.00000000000000000000
+0.01000000000000000000
+0.00010000000000000000
+40000.00000000000000000000
+400.00000000000000000000
+4.00000000000000000000
+0.00000000000000000000
+0.04000000000000000000
+0.00040000000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+1.00000000000000000000
+4.00000000000000000000
+9.85960000000000000000
+1.25440000000000000000
+1.25440000000000000000
+1.25888400000000000000
+1.25440000000000000000
+1.25888400000000000000
+15376.00000000000000000000
+15675.04000000000000000000
+1576255.14010000000000000000
+9.85960000000000000000
+9.85960000000000000000
+9.85960000000000000000
+1.00000000000000000000
 NULL
 NULL
 PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0
@@ -849,29 +849,29 @@ POSTHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-100	100
-10	10
-1	1
-200	200
-20	20
-2	2
-1	1
-2	2
-3.14	3
--1.12	-1
--1.12	-1
--1.122	-11
-1.12	1
-1.122	1
-124	124
-125.2	125
--1255.49	-1255
-3.14	3
-3.14	3
-3.14	4
-1	1
--1234567890.123456789	-1234567890
-1234567890.12345678	1234567890
+100.0000000000	100
+10.0000000000	10
+1.0000000000	1
+200.0000000000	200
+20.0000000000	20
+2.0000000000	2
+1.0000000000	1
+2.0000000000	2
+3.1400000000	3
+-1.1200000000	-1
+-1.1200000000	-1
+-1.1220000000	-11
+1.1200000000	1
+1.1220000000	1
+124.0000000000	124
+125.2000000000	125
+-1255.4900000000	-1255
+3.1400000000	3
+3.1400000000	3
+3.1400000000	4
+1.0000000000	1
+-1234567890.1234567890	-1234567890
+1234567890.1234567800	1234567890
 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
@@ -917,44 +917,44 @@ POSTHOOK: query: SELECT key * value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--19360000
+-19360000.0000000000
 NULL
-0
-0
-10000
-100
-1
-0
-0
-40000
-400
-4
-0
-0
-0
-0
-0
-0
-0
-0
-0
-1
-4
-9.42
-1.12
-1.12
-12.342
-1.12
-1.122
-15376
-15650
-1575639.95
-9.42
-9.42
-12.56
-1
-1524157875171467887.50190521
-1524157875171467876.3907942
+0.0000000000
+0.0000000000
+10000.0000000000
+100.0000000000
+1.0000000000
+0.0000000000
+0.0000000000
+40000.0000000000
+400.0000000000
+4.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+1.0000000000
+4.0000000000
+9.4200000000
+1.1200000000
+1.1200000000
+12.3420000000
+1.1200000000
+1.1220000000
+15376.0000000000
+15650.0000000000
+1575639.9500000000
+9.4200000000
+9.4200000000
+12.5600000000
+1.0000000000
+1524157875171467887.5019052100
+1524157875171467876.3907942000
 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
@@ -1268,40 +1268,40 @@ POSTHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
 PREHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
@@ -1350,30 +1350,30 @@ POSTHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1
-1
-1
-1
-1
-1
-1
-1
-1
+-1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
 1.046666666666666666667
-1.12
-1.12
-0.102
-1.12
-1.122
-1
-1.0016
+1.120000000000000000000
+1.120000000000000000000
+0.102000000000000000000
+1.120000000000000000000
+1.122000000000000000000
+1.000000000000000000000
+1.001600000000000000000
 1.000390438247011952191
 1.046666666666666666667
 1.046666666666666666667
-0.785
-1
-1.0000000001
-1.00000000009999999271
+0.785000000000000000000
+1.000000000000000000000
+1.000000000100000000000
+1.000000000099999992710
 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
@@ -1576,44 +1576,44 @@ POSTHOOK: query: SELECT abs(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
-0.3
-0.33
-0.333
-1
-2
-3.14
-1.12
-1.12
-1.122
-1.12
-1.122
-124
-125.2
-1255.49
-3.14
-3.14
-3.14
-1
-1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- avg
 EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
@@ -1700,23 +1700,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789	-1234567890.123456789	-1234567890.123456789
--1255	-1255.49	-1255.49	-1255.49
--11	-1.122	-1.122	-1.122
--1	-1.12	-1.12	-2.24
-0	0.02538461538461538461538	0.02538461538462	0.33
-1	1.0484	1.0484	5.242
-2	2	2	4
-3	3.14	3.14	9.42
-4	3.14	3.14	3.14
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125	125.2	125.2	125.2
-200	200	200	200
-4400	-4400	-4400	-4400
-1234567890	1234567890.12345678	1234567890.12345678	1234567890.12345678
+-1234567890	-1234567890.12345678900000000000000	-1234567890.12345678900000	-1234567890.1234567890
+-1255	-1255.49000000000000000000000	-1255.49000000000000	-1255.4900000000
+-11	-1.12200000000000000000000	-1.12200000000000	-1.1220000000
+-1	-1.12000000000000000000000	-1.12000000000000	-2.2400000000
+0	0.02538461538461538461538	0.02538461538462	0.3300000000
+1	1.04840000000000000000000	1.04840000000000	5.2420000000
+2	2.00000000000000000000000	2.00000000000000	4.0000000000
+3	3.14000000000000000000000	3.14000000000000	9.4200000000
+4	3.14000000000000000000000	3.14000000000000	3.1400000000
+10	10.00000000000000000000000	10.00000000000000	10.0000000000
+20	20.00000000000000000000000	20.00000000000000	20.0000000000
+100	100.00000000000000000000000	100.00000000000000	100.0000000000
+124	124.00000000000000000000000	124.00000000000000	124.0000000000
+125	125.20000000000000000000000	125.20000000000000	125.2000000000
+200	200.00000000000000000000000	200.00000000000000	200.0000000000
+4400	-4400.00000000000000000000000	-4400.00000000000000	-4400.0000000000
+1234567890	1234567890.12345678000000000000000	1234567890.12345678000000	1234567890.1234567800
 PREHOOK: query: -- negative
 EXPLAIN SELECT -key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1764,44 +1764,44 @@ POSTHOOK: query: SELECT -key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
--100
--10
--1
--0.1
--0.01
--200
--20
--2
-0
--0.2
--0.02
--0.3
--0.33
--0.333
-0.3
-0.33
-0.333
--1
--2
--3.14
-1.12
-1.12
-1.122
--1.12
--1.122
--124
--125.2
-1255.49
--3.14
--3.14
--3.14
--1
-1234567890.123456789
--1234567890.12345678
+0.0000000000
+0.0000000000
+-100.0000000000
+-10.0000000000
+-1.0000000000
+-0.1000000000
+-0.0100000000
+-200.0000000000
+-20.0000000000
+-2.0000000000
+0.0000000000
+-0.2000000000
+-0.0200000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+-1.0000000000
+-2.0000000000
+-3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+-1.1200000000
+-1.1220000000
+-124.0000000000
+-125.2000000000
+1255.4900000000
+-3.1400000000
+-3.1400000000
+-3.1400000000
+-1.0000000000
+1234567890.1234567890
+-1234567890.1234567800
 PREHOOK: query: -- positive
 EXPLAIN SELECT +key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1831,44 +1831,44 @@ POSTHOOK: query: SELECT +key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-1
-2
-3.14
--1.12
--1.12
--1.122
-1.12
-1.122
-124
-125.2
--1255.49
-3.14
-3.14
-3.14
-1
--1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+-1.1200000000
+-1.1200000000
+-1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+-1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+-1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- ceiling
 EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2086,42 +2086,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.00
 NULL
-0
-0
-100
-10
-1
-0.1
+0.00
+0.00
+100.00
+10.00
+1.00
+0.10
 0.01
-200
-20
-2
-0
-0.2
+200.00
+20.00
+2.00
+0.00
+0.20
 0.02
-0.3
+0.30
 0.33
 0.33
--0.3
+-0.30
 -0.33
 -0.33
-1
-2
+1.00
+2.00
 3.14
 -1.12
 -1.12
 -1.12
 1.12
 1.12
-124
-125.2
+124.00
+125.20
 -1255.49
 3.14
 3.14
 3.14
-1
+1.00
 -1234567890.12
 1234567890.12
 PREHOOK: query: -- power
@@ -2255,44 +2255,44 @@ POSTHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--2199
+-2199.000000000000
 NULL
 NULL
 NULL
-1
-1
-0
-0
-0
-1
-1
-0
+1.000000000000
+1.000000000000
+0.000000000000
+0.000000000000
+0.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
 NULL
-0
-0
-0.1
-0.01
-0.001
-0.1
-0.01
-0.001
-0
-0
-1
--0.12
--0.12
--0.122
-0.44
-0.439
-1
-1
--626.745
-1
-1
-1
-0
--617283944.0617283945
-1
+0.000000000000
+0.000000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.000000000000
+0.000000000000
+1.000000000000
+-0.120000000000
+-0.120000000000
+-0.122000000000
+0.440000000000
+0.439000000000
+1.000000000000
+1.000000000000
+-626.745000000000
+1.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
+-617283944.061728394500
+1.000000000000
 PREHOOK: query: -- stddev, var
 EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value
 PREHOOK: type: QUERY
@@ -2596,7 +2596,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890.123456789
+-1234567890.1234567890
 PREHOOK: query: -- max
 EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2663,7 +2663,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1234567890.12345678
+1234567890.1234567800
 PREHOOK: query: -- count
 EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
index 1cd5959..337d83f 100644
--- a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
@@ -111,56 +111,56 @@ LIMIT 50
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_test
 #### A masked pattern was here ####
--1073051226	-7382.0	-4409.2486486486	-5280.969230769231	-4409.2486486486
--1072081801	8373.0	5001.1702702703	5989.915384615385	5001.1702702703
--1072076362	-5470.0	-3267.2162162162	-3913.1538461538466	-3267.2162162162
--1070883071	-741.0	-442.5972972973	-530.1	-442.5972972973
--1070551679	-947.0	-565.6405405405	-677.4692307692308	-565.6405405405
--1069512165	11417.0	6819.3432432432	8167.546153846154	6819.3432432432
--1069109166	8390.0	5011.3243243243	6002.076923076923	5011.3243243243
--1068623584	-14005.0	-8365.1486486486	-10018.961538461539	-8365.1486486486
--1067386090	-3977.0	-2375.4513513514	-2845.084615384616	-2375.4513513514
--1066922682	-9987.0	-5965.2081081081	-7144.546153846154	-5965.2081081081
--1066226047	-9439.0	-5637.8891891892	-6752.515384615385	-5637.8891891892
--1065117869	2538.0	1515.9405405405	1815.646153846154	1515.9405405405
--1064949302	6454.0	3854.9567567568	4617.092307692308	3854.9567567568
--1063498122	-11480.0	-6856.972972973	-8212.615384615387	-6856.972972973
--1062973443	10541.0	6296.1108108108	7540.869230769231	6296.1108108108
--1061614989	-4234.0	-2528.9567567568	-3028.938461538462	-2528.9567567568
--1061057428	-1085.0	-648.0675675676	-776.1923076923077	-648.0675675676
--1059941909	8782.0	5245.4648648649	6282.507692307693	5245.4648648649
--1059338191	7322.0	4373.4108108108	5238.046153846154	4373.4108108108
--1059047258	12452.0	7437.5459459459	8907.969230769231	7437.5459459459
--1056684111	13991.0	8356.7864864865	10008.946153846155	8356.7864864865
--1055945837	13690.0	8177	9793.615384615387	8177
--1055669248	2570.0	1535.0540540541	1838.538461538462	1535.0540540541
--1055316250	-14990.0	-8953.4864864865	-10723.615384615385	-8953.4864864865
--1053385587	14504.0	8663.2	10375.938461538462	8663.2
--1053238077	-3704.0	-2212.3891891892	-2649.784615384616	-2212.3891891892
--1052745800	-12404.0	-7408.8756756757	-8873.630769230771	-7408.8756756757
--1052322972	-7433.0	-4439.7108108108	-5317.453846153847	-4439.7108108108
--1050684541	-8261.0	-4934.272972973	-5909.792307692308	-4934.272972973
--1050657303	-6999.0	-4180.4837837838	-5006.976923076923	-4180.4837837838
--1050165799	8634.0	5157.0648648649	6176.63076923077	5157.0648648649
+-1073051226	-7382.0	-4409.2486486486	-5280.96923076923100	-4409.2486486486
+-1072081801	8373.0	5001.1702702703	5989.91538461538500	5001.1702702703
+-1072076362	-5470.0	-3267.2162162162	-3913.15384615384660	-3267.2162162162
+-1070883071	-741.0	-442.5972972973	-530.10000000000000	-442.5972972973
+-1070551679	-947.0	-565.6405405405	-677.46923076923080	-565.6405405405
+-1069512165	11417.0	6819.3432432432	8167.54615384615400	6819.3432432432
+-1069109166	8390.0	5011.3243243243	6002.07692307692300	5011.3243243243
+-1068623584	-14005.0	-8365.1486486486	-10018.96153846153900	-8365.1486486486
+-1067386090	-3977.0	-2375.4513513514	-2845.08461538461600	-2375.4513513514
+-1066922682	-9987.0	-5965.2081081081	-7144.54615384615400	-5965.2081081081
+-1066226047	-9439.0	-5637.8891891892	-6752.51538461538500	-5637.8891891892
+-1065117869	2538.0	1515.9405405405	1815.64615384615400	1515.9405405405
+-1064949302	6454.0	3854.9567567568	4617.09230769230800	3854.9567567568
+-1063498122	-11480.0	-6856.9729729730	-8212.61538461538700	-6856.9729729730
+-1062973443	10541.0	6296.1108108108	7540.86923076923100	6296.1108108108
+-1061614989	-4234.0	-2528.9567567568	-3028.93846153846200	-2528.9567567568
+-1061057428	-1085.0	-648.0675675676	-776.19230769230770	-648.0675675676
+-1059941909	8782.0	5245.4648648649	6282.50769230769300	5245.4648648649
+-1059338191	7322.0	4373.4108108108	5238.04615384615400	4373.4108108108
+-1059047258	12452.0	7437.5459459459	8907.96923076923100	7437.5459459459
+-1056684111	13991.0	8356.7864864865	10008.94615384615500	8356.7864864865
+-1055945837	13690.0	8177.0000000000	9793.61538461538700	8177.0000000000
+-1055669248	2570.0	1535.0540540541	1838.53846153846200	1535.0540540541
+-1055316250	-14990.0	-8953.4864864865	-10723.61538461538500	-8953.4864864865
+-1053385587	14504.0	8663.2000000000	10375.93846153846200	8663.2000000000
+-1053238077	-3704.0	-2212.3891891892	-2649.78461538461600	-2212.3891891892
+-1052745800	-12404.0	-7408.8756756757	-8873.63076923077100	-7408.8756756757
+-1052322972	-7433.0	-4439.7108108108	-5317.45384615384700	-4439.7108108108
+-1050684541	-8261.0	-4934.2729729730	-5909.79230769230800	-4934.2729729730
+-1050657303	-6999.0	-4180.4837837838	-5006.97692307692300	-4180.4837837838
+-1050165799	8634.0	5157.0648648649	6176.63076923077000	5157.0648648649
 -1048934049	-524.0	-312.9837837838	-374.86153846153854	-312.9837837838
--1046399794	4130.0	2466.8378378378	2954.5384615384614	2466.8378378378
--1045867222	-8034.0	-4798.6864864865	-5747.400000000001	-4798.6864864865
--1045196363	-5039.0	-3009.7810810811	-3604.823076923077	-3009.7810810811
--1045181724	-5706.0	-3408.1783783784	-4081.9846153846156	-3408.1783783784
--1045087657	-5865.0	-3503.1486486486	-4195.7307692307695	-3503.1486486486
--1044207190	5381.0	3214.0567567568	3849.4846153846156	3214.0567567568
--1044093617	-3422.0	-2043.9513513514	-2448.046153846154	-2043.9513513514
--1043573508	16216.0	9685.772972973	11600.676923076924	9685.772972973
--1043132597	12302.0	7347.9513513514	8800.66153846154	7347.9513513514
--1043082182	9180.0	5483.1891891892	6567.2307692307695	5483.1891891892
--1042805968	5133.0	3065.927027027	3672.0692307692307	3065.927027027
--1042712895	9296.0	5552.4756756757	6650.215384615385	5552.4756756757
--1042396242	9583.0	5723.9	6855.53076923077	5723.9
--1041734429	-836.0	-499.3405405405	-598.0615384615385	-499.3405405405
--1041391389	-12970.0	-7746.9459459459	-9278.538461538463	-7746.9459459459
--1041252354	756.0	451.5567567568	540.8307692307692	451.5567567568
--1039776293	13704.0	8185.3621621622	9803.630769230771	8185.3621621622
--1039762548	-3802.0	-2270.9243243243	-2719.8923076923083	-2270.9243243243
+-1046399794	4130.0	2466.8378378378	2954.53846153846140	2466.8378378378
+-1045867222	-8034.0	-4798.6864864865	-5747.40000000000100	-4798.6864864865
+-1045196363	-5039.0	-3009.7810810811	-3604.82307692307700	-3009.7810810811
+-1045181724	-5706.0	-3408.1783783784	-4081.98461538461560	-3408.1783783784
+-1045087657	-5865.0	-3503.1486486486	-4195.73076923076950	-3503.1486486486
+-1044207190	5381.0	3214.0567567568	3849.48461538461560	3214.0567567568
+-1044093617	-3422.0	-2043.9513513514	-2448.04615384615400	-2043.9513513514
+-1043573508	16216.0	9685.7729729730	11600.67692307692400	9685.7729729730
+-1043132597	12302.0	7347.9513513514	8800.66153846154000	7347.9513513514
+-1043082182	9180.0	5483.1891891892	6567.23076923076950	5483.1891891892
+-1042805968	5133.0	3065.9270270270	3672.06923076923070	3065.9270270270
+-1042712895	9296.0	5552.4756756757	6650.21538461538500	5552.4756756757
+-1042396242	9583.0	5723.9000000000	6855.53076923077000	5723.9000000000
+-1041734429	-836.0	-499.3405405405	-598.06153846153850	-499.3405405405
+-1041391389	-12970.0	-7746.9459459459	-9278.53846153846300	-7746.9459459459
+-1041252354	756.0	451.5567567568	540.83076923076920	451.5567567568
+-1039776293	13704.0	8185.3621621622	9803.63076923077100	8185.3621621622
+-1039762548	-3802.0	-2270.9243243243	-2719.89230769230830	-2270.9243243243
 PREHOOK: query: SELECT sum(hash(*))
   FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test
         WHERE cdecimal1 is not null and cdecimal2 is not null

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/update_all_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/update_all_types.q.out b/ql/src/test/results/clientpositive/update_all_types.q.out
index 1cfa088..c5c1abb 100644
--- a/ql/src/test/results/clientpositive/update_all_types.q.out
+++ b/ql/src/test/results/clientpositive/update_all_types.q.out
@@ -96,11 +96,11 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+-51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-741	-1070883071	-1070883071	-1645852809	NULL	-741.0	NULL	NULL	1969-12-31	0ruyd6Y50JpdGRf6HqD	0ruyd6Y50JpdGRf6HqD	xH7445Rals48VOulSyR5F               	false
@@ -150,12 +150,12 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+-51	NULL	-1071480828	-1071480828	-1401575336	-51.0	NULL	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
 1	2	-1070883071	3	4	3.14	6.28	5.99	NULL	2014-09-01	its a beautiful day in the neighbhorhood	a beautiful day for a neighbor	wont you be mine                    	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-947	-1070551679	-1070551679	1864027286	NULL	-947.0	NULL	NULL	1969-12-31	iUR3Q	iUR3Q	4KWs6gw7lv2WYd66P                   	false
@@ -184,12 +184,12 @@ POSTHOOK: query: select * from acid_uat order by i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_uat
 #### A masked pattern was here ####
--102	-51	-1071480828	-1071480828	-1401575336	-51.0	-51.0	-51	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
+-102	-51	-1071480828	-1071480828	-1401575336	-51.0	-51.0	-51.00	1969-12-31 16:00:08.451	NULL	aw724t8c5558x2xneC624	aw724t8c5558x2xneC624	4uE7l74tESBiKfu7c8wM7GA             	true
 1	2	-1070883071	3	4	3.14	6.28	5.99	NULL	2014-09-01	its a beautiful day in the neighbhorhood	a beautiful day for a neighbor	wont you be mine                    	true
-11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
-11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
-11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
-8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
+11	NULL	-1069736047	-1069736047	-453772520	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	k17Am8uPHWk02cEf1jet	k17Am8uPHWk02cEf1jet	qrXLLNX1                            	true
+11	NULL	-1072910839	-1072910839	2048385991	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	0iqrc5	0iqrc5	KbaDXiN85adbHRx58v                  	false
+11	NULL	-1073279343	-1073279343	-1595604468	11.0	NULL	11.00	1969-12-31 16:00:02.351	NULL	oj1YrV5Wa	oj1YrV5Wa	P76636jJ6qM17d7DIy                  	true
+8	NULL	-1071363017	-1071363017	1349676361	8.0	NULL	8.00	1969-12-31 16:00:15.892	NULL	Anj0oF	Anj0oF	IwE1G7Qb0B1NEfV030g                 	true
 NULL	-5470	-1072076362	-1072076362	1864027286	NULL	-5470.0	NULL	NULL	1969-12-31	2uLyD28144vklju213J1mr	2uLyD28144vklju213J1mr	4KWs6gw7lv2WYd66P                   	true
 NULL	-7382	-1073051226	-1073051226	-1887561756	NULL	-7382.0	NULL	NULL	1969-12-31	A34p7oRr2WvUJNf	A34p7oRr2WvUJNf	4hA4KQj2vD3fI6gX82220d              	false
 NULL	-947	-1070551679	-1070551679	1864027286	NULL	-947.0	NULL	NULL	1969-12-31	iUR3Q	iUR3Q	4KWs6gw7lv2WYd66P                   	false

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
index 72dc004..e0cf903 100644
--- a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
@@ -164,4 +164,4 @@ select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vectortab2korc
 #### A masked pattern was here ####
--4997414117561.546875	4994550248722.298828	-10252745435816.02441	-5399023399.587163986308583465
+-4997414117561.546875000000000000	4994550248722.298828000000000000	-10252745435816.024410000000000000	-5399023399.587163986308583465

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_between_in.q.out b/ql/src/test/results/clientpositive/vector_between_in.q.out
index a9b9a4b..b80da1b 100644
--- a/ql/src/test/results/clientpositive/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/vector_between_in.q.out
@@ -594,34 +594,34 @@ POSTHOOK: Input: default@decimal_date_test
 -18.5162162162
 -17.3216216216
 -16.7243243243
--16.127027027
+-16.1270270270
 -15.5297297297
 -10.7513513514
 -9.5567567568
 -8.3621621622
--5.972972973
+-5.9729729730
 -3.5837837838
 4.1810810811
 4.7783783784
 4.7783783784
 5.3756756757
-5.972972973
-5.972972973
+5.9729729730
+5.9729729730
 11.3486486486
 11.3486486486
 11.9459459459
 14.9324324324
 19.1135135135
 20.3081081081
-22.1
+22.1000000000
 24.4891891892
 33.4486486486
 34.6432432432
 40.0189189189
 42.4081081081
 43.0054054054
-44.2
-44.2
+44.2000000000
+44.2000000000
 44.7972972973
 45.9918918919
 PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
index 9edd6f1..e5d56ec 100644
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
@@ -207,13 +207,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out
index 3ed833b..86f1677 100644
--- a/ql/src/test/results/clientpositive/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/vector_data_types.q.out
@@ -153,7 +153,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education
@@ -239,7 +239,7 @@ POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 NULL	374	65560	4294967516	65.43	22.48	true	oscar quirinius	2013-03-01 09:11:58.703316	16.86	mathematics
 NULL	409	65536	4294967490	46.97	25.92	false	fred miller	2013-03-01 09:11:58.703116	33.45	history
-NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.8	mathematics
+NULL	473	65720	4294967324	80.74	40.6	false	holly falkner	2013-03-01 09:11:58.703111	18.80	mathematics
 -3	275	65622	4294967302	71.78	8.49	false	wendy robinson	2013-03-01 09:11:58.703294	95.39	undecided
 -3	344	65733	4294967363	0.56	11.96	true	rachel thompson	2013-03-01 09:11:58.703276	88.46	wind surfing
 -3	376	65548	4294967431	96.78	43.23	false	fred ellison	2013-03-01 09:11:58.703233	75.39	education

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_2.q.out
index 8a4d53a..ff82f38 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_2.q.out
@@ -1051,7 +1051,7 @@ POSTHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_2
 #### A masked pattern was here ####
-1355944339.1234567
+1355944339.12345670
 PREHOOK: query: explain
 select cast(true as decimal) as c from decimal_2 order by c
 PREHOOK: type: QUERY
@@ -1406,7 +1406,7 @@ POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_2
 #### A masked pattern was here ####
-1
+1.0000000000000000000
 PREHOOK: query: explain
 select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_3.q.out b/ql/src/test/results/clientpositive/vector_decimal_3.q.out
index 75f872e..eea91bb 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_3.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_3.q.out
@@ -47,43 +47,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -92,43 +92,43 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-1234567890.12345678	1234567890
-200	200
-125.2	125
-124	124
-100	100
-20	20
-10	10
-3.14	4
-3.14	3
-3.14	3
-3.14	3
-2	2
-2	2
-1.122	1
-1.12	1
-1	1
-1	1
-1	1
-0.333	0
-0.33	0
-0.3	0
-0.2	0
-0.1	0
-0.02	0
-0.01	0
-0	0
-0	0
-0	0
--0.3	0
--0.33	0
--0.333	0
--1.12	-1
--1.12	-1
--1.122	-11
--1255.49	-1255
--4400	4400
--1234567890.123456789	-1234567890
+1234567890.123456780000000000	1234567890
+200.000000000000000000	200
+125.200000000000000000	125
+124.000000000000000000	124
+100.000000000000000000	100
+20.000000000000000000	20
+10.000000000000000000	10
+3.140000000000000000	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+2.000000000000000000	2
+2.000000000000000000	2
+1.122000000000000000	1
+1.120000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+0.333000000000000000	0
+0.330000000000000000	0
+0.300000000000000000	0
+0.200000000000000000	0
+0.100000000000000000	0
+0.020000000000000000	0
+0.010000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+-0.300000000000000000	0
+-0.330000000000000000	0
+-0.333000000000000000	0
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-1.122000000000000000	-11
+-1255.490000000000000000	-1255
+-4400.000000000000000000	4400
+-1234567890.123456789000000000	-1234567890
 NULL	0
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -139,43 +139,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -185,34 +185,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL
--1234567890.123456789
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
-1234567890.12345678
+-1234567890.123456789000000000
+-4400.000000000000000000
+-1255.490000000000000000
+-1.122000000000000000
+-1.120000000000000000
+-0.333000000000000000
+-0.330000000000000000
+-0.300000000000000000
+0.000000000000000000
+0.010000000000000000
+0.020000000000000000
+0.100000000000000000
+0.200000000000000000
+0.300000000000000000
+0.330000000000000000
+0.333000000000000000
+1.000000000000000000
+1.120000000000000000
+1.122000000000000000
+2.000000000000000000
+3.140000000000000000
+10.000000000000000000
+20.000000000000000000
+100.000000000000000000
+124.000000000000000000
+125.200000000000000000
+200.000000000000000000
+1234567890.123456780000000000
 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -222,34 +222,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-2
--0.333	0
--0.33	0
--0.3	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	3
-1.12	1
-1.122	1
-2	4
-3.14	13
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-2
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	3
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	4
+3.140000000000000000	13
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -258,23 +258,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789
--1255	-1255.49
--11	-1.122
--1	-2.24
-0	0.33
-1	5.242
-2	4
-3	9.42
-4	3.14
-10	10
-20	20
-100	100
-124	124
-125	125.2
-200	200
-4400	-4400
-1234567890	1234567890.12345678
+-1234567890	-1234567890.123456789000000000
+-1255	-1255.490000000000000000
+-11	-1.122000000000000000
+-1	-2.240000000000000000
+0	0.330000000000000000
+1	5.242000000000000000
+2	4.000000000000000000
+3	9.420000000000000000
+4	3.140000000000000000
+10	10.000000000000000000
+20	20.000000000000000000
+100	100.000000000000000000
+124	124.000000000000000000
+125	125.200000000000000000
+200	200.000000000000000000
+4400	-4400.000000000000000000
+1234567890	1234567890.123456780000000000
 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -283,71 +283,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -356,10 +356,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -368,10 +368,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: DROP TABLE DECIMAL_3_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_3_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_4.q.out b/ql/src/test/results/clientpositive/vector_decimal_4.q.out
index 613f5a8..c7d3d9e 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_4.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_4.q.out
@@ -57,43 +57,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_1
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
+-1234567890.1234567890000000000000000	-1234567890
+-4400.0000000000000000000000000	4400
+-1255.4900000000000000000000000	-1255
+-1.1220000000000000000000000	-11
+-1.1200000000000000000000000	-1
+-1.1200000000000000000000000	-1
+-0.3330000000000000000000000	0
+-0.3300000000000000000000000	0
+-0.3000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0100000000000000000000000	0
+0.0200000000000000000000000	0
+0.1000000000000000000000000	0
+0.2000000000000000000000000	0
+0.3000000000000000000000000	0
+0.3300000000000000000000000	0
+0.3330000000000000000000000	0
 0.9999999999999999999999999	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+1.0000000000000000000000000	1
+1.0000000000000000000000000	1
+1.1200000000000000000000000	1
+1.1220000000000000000000000	1
+2.0000000000000000000000000	2
+2.0000000000000000000000000	2
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	4
+10.0000000000000000000000000	10
+20.0000000000000000000000000	20
+100.0000000000000000000000000	100
+124.0000000000000000000000000	124
+125.2000000000000000000000000	125
+200.0000000000000000000000000	200
+1234567890.1234567800000000000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -103,43 +103,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -149,43 +149,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -195,43 +195,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: DROP TABLE DECIMAL_4_1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_5.q.out b/ql/src/test/results/clientpositive/vector_decimal_5.q.out
index 34c3351..0bfd12e 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_5.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_5.q.out
@@ -59,41 +59,41 @@ POSTHOOK: Input: default@decimal_5
 NULL
 NULL
 NULL
--4400
--1255.49
--1.122
--1.12
--1.12
--0.333
--0.33
--0.3
-0
-0
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1
-1
-1.12
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.00000
+1.00000
+1.12000
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -103,32 +103,32 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.12000
+1.12200
+2.00000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -185,40 +185,40 @@ POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
+0.000
+0.000
+100.000
+10.000
+1.000
+0.100
+0.010
+200.000
+20.000
+2.000
+0.000
+0.200
+0.020
+0.300
+0.330
 0.333
--0.3
--0.33
+-0.300
+-0.330
 -0.333
-1
-2
-3.14
--1.12
--1.12
+1.000
+2.000
+3.140
+-1.120
+-1.120
 -1.122
-1.12
+1.120
 1.122
-124
-125.2
+124.000
+125.200
 NULL
-3.14
-3.14
-3.14
-1
+3.140
+3.140
+3.140
+1.000
 NULL
 NULL
 PREHOOK: query: DROP TABLE DECIMAL_5_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
index 9cdd7fc..e0ccbc6 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
@@ -119,27 +119,27 @@ NULL	0
 NULL	3
 NULL	4
 NULL	1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--0.333	0
--0.3	0
-0	0
-0	0
-0.333	0
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-3.14	3
-3.14	3
-3.14	4
-10	10
+-4400.00000	4400
+-1255.49000	-1255
+-1.12200	-11
+-1.12000	-1
+-0.33300	0
+-0.30000	0
+0.00000	0
+0.00000	0
+0.33300	0
+1.00000	1
+1.00000	1
+1.12000	1
+1.12200	1
+2.00000	2
+3.14000	3
+3.14000	3
+3.14000	4
+10.00000	10
 10.73433	5
-124	124
-125.2	125
+124.00000	124
+125.20000	125
 23232.23435	2
 PREHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -151,27 +151,27 @@ POSTHOOK: Input: default@decimal_6_2
 #### A masked pattern was here ####
 NULL	0
 -1234567890.1235	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--0.333	0
--0.3	0
-0	0
-0	0
-0.333	0
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-3.14	3
-3.14	3
-3.14	4
-10	10
+-4400.0000	4400
+-1255.4900	-1255
+-1.1220	-11
+-1.1200	-1
+-0.3330	0
+-0.3000	0
+0.0000	0
+0.0000	0
+0.3330	0
+1.0000	1
+1.0000	1
+1.1200	1
+1.1220	1
+2.0000	2
+3.1400	3
+3.1400	3
+3.1400	4
+10.0000	10
 10.7343	5
-124	124
-125.2	125
+124.0000	124
+125.2000	125
 23232.2344	2
 2389432.2375	3
 2389432.2375	4
@@ -200,54 +200,54 @@ NULL
 NULL
 NULL
 NULL
--1234567890.1235
--4400
--4400
--1255.49
--1255.49
--1.122
--1.122
--1.12
--1.12
--0.333
--0.333
--0.3
--0.3
-0
-0
-0
-0
-0.333
-0.333
-1
-1
-1
-1
-1.12
-1.12
-1.122
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-3.14
-3.14
-10
-10
-10.7343
+-1234567890.12350
+-4400.00000
+-4400.00000
+-1255.49000
+-1255.49000
+-1.12200
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33300
+-0.30000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.00000
+0.33300
+0.33300
+1.00000
+1.00000
+1.00000
+1.00000
+1.12000
+1.12000
+1.12200
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+10.00000
+10.73430
 10.73433
-124
-124
-125.2
-125.2
+124.00000
+124.00000
+125.20000
+125.20000
 23232.23435
-23232.2344
-2389432.2375
-2389432.2375
-1234567890.1235
+23232.23440
+2389432.23750
+2389432.23750
+1234567890.12350
 PREHOOK: query: CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@decimal_6_1


[02/55] [abbrv] hive git commit: HIVE-12215: Exchange partition does not show outputs field for post/pre execute hooks (Aihua Xu, reviewed by Xuefu Zhang)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index e922d7d..a6862be 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -394,6 +394,19 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
    */
   public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
   /**
+   * @param array $partitionSpecs
+   * @param string $source_db
+   * @param string $source_table_name
+   * @param string $dest_db
+   * @param string $dest_table_name
+   * @return \metastore\Partition[]
+   * @throws \metastore\MetaException
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\InvalidObjectException
+   * @throws \metastore\InvalidInputException
+   */
+  public function exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
+  /**
    * @param string $db_name
    * @param string $tbl_name
    * @param string[] $part_vals
@@ -3622,6 +3635,73 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
     throw new \Exception("exchange_partition failed: unknown result");
   }
 
+  public function exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name)
+  {
+    $this->send_exchange_partitions($partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
+    return $this->recv_exchange_partitions();
+  }
+
+  public function send_exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name)
+  {
+    $args = new \metastore\ThriftHiveMetastore_exchange_partitions_args();
+    $args->partitionSpecs = $partitionSpecs;
+    $args->source_db = $source_db;
+    $args->source_table_name = $source_table_name;
+    $args->dest_db = $dest_db;
+    $args->dest_table_name = $dest_table_name;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'exchange_partitions', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('exchange_partitions', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_exchange_partitions()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_exchange_partitions_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_exchange_partitions_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    if ($result->o3 !== null) {
+      throw $result->o3;
+    }
+    if ($result->o4 !== null) {
+      throw $result->o4;
+    }
+    throw new \Exception("exchange_partitions failed: unknown result");
+  }
+
   public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names)
   {
     $this->send_get_partition_with_auth($db_name, $tbl_name, $part_vals, $user_name, $group_names);
@@ -19473,6 +19553,410 @@ class ThriftHiveMetastore_exchange_partition_result {
 
 }
 
+class ThriftHiveMetastore_exchange_partitions_args {
+  static $_TSPEC;
+
+  /**
+   * @var array
+   */
+  public $partitionSpecs = null;
+  /**
+   * @var string
+   */
+  public $source_db = null;
+  /**
+   * @var string
+   */
+  public $source_table_name = null;
+  /**
+   * @var string
+   */
+  public $dest_db = null;
+  /**
+   * @var string
+   */
+  public $dest_table_name = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'partitionSpecs',
+          'type' => TType::MAP,
+          'ktype' => TType::STRING,
+          'vtype' => TType::STRING,
+          'key' => array(
+            'type' => TType::STRING,
+          ),
+          'val' => array(
+            'type' => TType::STRING,
+            ),
+          ),
+        2 => array(
+          'var' => 'source_db',
+          'type' => TType::STRING,
+          ),
+        3 => array(
+          'var' => 'source_table_name',
+          'type' => TType::STRING,
+          ),
+        4 => array(
+          'var' => 'dest_db',
+          'type' => TType::STRING,
+          ),
+        5 => array(
+          'var' => 'dest_table_name',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['partitionSpecs'])) {
+        $this->partitionSpecs = $vals['partitionSpecs'];
+      }
+      if (isset($vals['source_db'])) {
+        $this->source_db = $vals['source_db'];
+      }
+      if (isset($vals['source_table_name'])) {
+        $this->source_table_name = $vals['source_table_name'];
+      }
+      if (isset($vals['dest_db'])) {
+        $this->dest_db = $vals['dest_db'];
+      }
+      if (isset($vals['dest_table_name'])) {
+        $this->dest_table_name = $vals['dest_table_name'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_exchange_partitions_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::MAP) {
+            $this->partitionSpecs = array();
+            $_size669 = 0;
+            $_ktype670 = 0;
+            $_vtype671 = 0;
+            $xfer += $input->readMapBegin($_ktype670, $_vtype671, $_size669);
+            for ($_i673 = 0; $_i673 < $_size669; ++$_i673)
+            {
+              $key674 = '';
+              $val675 = '';
+              $xfer += $input->readString($key674);
+              $xfer += $input->readString($val675);
+              $this->partitionSpecs[$key674] = $val675;
+            }
+            $xfer += $input->readMapEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->source_db);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->source_table_name);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 4:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->dest_db);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 5:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->dest_table_name);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_exchange_partitions_args');
+    if ($this->partitionSpecs !== null) {
+      if (!is_array($this->partitionSpecs)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('partitionSpecs', TType::MAP, 1);
+      {
+        $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
+        {
+          foreach ($this->partitionSpecs as $kiter676 => $viter677)
+          {
+            $xfer += $output->writeString($kiter676);
+            $xfer += $output->writeString($viter677);
+          }
+        }
+        $output->writeMapEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->source_db !== null) {
+      $xfer += $output->writeFieldBegin('source_db', TType::STRING, 2);
+      $xfer += $output->writeString($this->source_db);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->source_table_name !== null) {
+      $xfer += $output->writeFieldBegin('source_table_name', TType::STRING, 3);
+      $xfer += $output->writeString($this->source_table_name);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->dest_db !== null) {
+      $xfer += $output->writeFieldBegin('dest_db', TType::STRING, 4);
+      $xfer += $output->writeString($this->dest_db);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->dest_table_name !== null) {
+      $xfer += $output->writeFieldBegin('dest_table_name', TType::STRING, 5);
+      $xfer += $output->writeString($this->dest_table_name);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_exchange_partitions_result {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\Partition[]
+   */
+  public $success = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o1 = null;
+  /**
+   * @var \metastore\NoSuchObjectException
+   */
+  public $o2 = null;
+  /**
+   * @var \metastore\InvalidObjectException
+   */
+  public $o3 = null;
+  /**
+   * @var \metastore\InvalidInputException
+   */
+  public $o4 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::LST,
+          'etype' => TType::STRUCT,
+          'elem' => array(
+            'type' => TType::STRUCT,
+            'class' => '\metastore\Partition',
+            ),
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\NoSuchObjectException',
+          ),
+        3 => array(
+          'var' => 'o3',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\InvalidObjectException',
+          ),
+        4 => array(
+          'var' => 'o4',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\InvalidInputException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+      if (isset($vals['o3'])) {
+        $this->o3 = $vals['o3'];
+      }
+      if (isset($vals['o4'])) {
+        $this->o4 = $vals['o4'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_exchange_partitions_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::LST) {
+            $this->success = array();
+            $_size678 = 0;
+            $_etype681 = 0;
+            $xfer += $input->readListBegin($_etype681, $_size678);
+            for ($_i682 = 0; $_i682 < $_size678; ++$_i682)
+            {
+              $elem683 = null;
+              $elem683 = new \metastore\Partition();
+              $xfer += $elem683->read($input);
+              $this->success []= $elem683;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->o2 = new \metastore\NoSuchObjectException();
+            $xfer += $this->o2->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRUCT) {
+            $this->o3 = new \metastore\InvalidObjectException();
+            $xfer += $this->o3->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 4:
+          if ($ftype == TType::STRUCT) {
+            $this->o4 = new \metastore\InvalidInputException();
+            $xfer += $this->o4->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_exchange_partitions_result');
+    if ($this->success !== null) {
+      if (!is_array($this->success)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('success', TType::LST, 0);
+      {
+        $output->writeListBegin(TType::STRUCT, count($this->success));
+        {
+          foreach ($this->success as $iter684)
+          {
+            $xfer += $iter684->write($output);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o2 !== null) {
+      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+      $xfer += $this->o2->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o3 !== null) {
+      $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+      $xfer += $this->o3->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o4 !== null) {
+      $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4);
+      $xfer += $this->o4->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
 class ThriftHiveMetastore_get_partition_with_auth_args {
   static $_TSPEC;
 
@@ -19585,14 +20069,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size669 = 0;
-            $_etype672 = 0;
-            $xfer += $input->readListBegin($_etype672, $_size669);
-            for ($_i673 = 0; $_i673 < $_size669; ++$_i673)
+            $_size685 = 0;
+            $_etype688 = 0;
+            $xfer += $input->readListBegin($_etype688, $_size685);
+            for ($_i689 = 0; $_i689 < $_size685; ++$_i689)
             {
-              $elem674 = null;
-              $xfer += $input->readString($elem674);
-              $this->part_vals []= $elem674;
+              $elem690 = null;
+              $xfer += $input->readString($elem690);
+              $this->part_vals []= $elem690;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19609,14 +20093,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size675 = 0;
-            $_etype678 = 0;
-            $xfer += $input->readListBegin($_etype678, $_size675);
-            for ($_i679 = 0; $_i679 < $_size675; ++$_i679)
+            $_size691 = 0;
+            $_etype694 = 0;
+            $xfer += $input->readListBegin($_etype694, $_size691);
+            for ($_i695 = 0; $_i695 < $_size691; ++$_i695)
             {
-              $elem680 = null;
-              $xfer += $input->readString($elem680);
-              $this->group_names []= $elem680;
+              $elem696 = null;
+              $xfer += $input->readString($elem696);
+              $this->group_names []= $elem696;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -19654,9 +20138,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter681)
+          foreach ($this->part_vals as $iter697)
           {
-            $xfer += $output->writeString($iter681);
+            $xfer += $output->writeString($iter697);
           }
         }
         $output->writeListEnd();
@@ -19676,9 +20160,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter682)
+          foreach ($this->group_names as $iter698)
           {
-            $xfer += $output->writeString($iter682);
+            $xfer += $output->writeString($iter698);
           }
         }
         $output->writeListEnd();
@@ -20269,15 +20753,15 @@ class ThriftHiveMetastore_get_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size683 = 0;
-            $_etype686 = 0;
-            $xfer += $input->readListBegin($_etype686, $_size683);
-            for ($_i687 = 0; $_i687 < $_size683; ++$_i687)
+            $_size699 = 0;
+            $_etype702 = 0;
+            $xfer += $input->readListBegin($_etype702, $_size699);
+            for ($_i703 = 0; $_i703 < $_size699; ++$_i703)
             {
-              $elem688 = null;
-              $elem688 = new \metastore\Partition();
-              $xfer += $elem688->read($input);
-              $this->success []= $elem688;
+              $elem704 = null;
+              $elem704 = new \metastore\Partition();
+              $xfer += $elem704->read($input);
+              $this->success []= $elem704;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20321,9 +20805,9 @@ class ThriftHiveMetastore_get_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter689)
+          foreach ($this->success as $iter705)
           {
-            $xfer += $iter689->write($output);
+            $xfer += $iter705->write($output);
           }
         }
         $output->writeListEnd();
@@ -20469,14 +20953,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size690 = 0;
-            $_etype693 = 0;
-            $xfer += $input->readListBegin($_etype693, $_size690);
-            for ($_i694 = 0; $_i694 < $_size690; ++$_i694)
+            $_size706 = 0;
+            $_etype709 = 0;
+            $xfer += $input->readListBegin($_etype709, $_size706);
+            for ($_i710 = 0; $_i710 < $_size706; ++$_i710)
             {
-              $elem695 = null;
-              $xfer += $input->readString($elem695);
-              $this->group_names []= $elem695;
+              $elem711 = null;
+              $xfer += $input->readString($elem711);
+              $this->group_names []= $elem711;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20524,9 +21008,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter696)
+          foreach ($this->group_names as $iter712)
           {
-            $xfer += $output->writeString($iter696);
+            $xfer += $output->writeString($iter712);
           }
         }
         $output->writeListEnd();
@@ -20615,15 +21099,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size697 = 0;
-            $_etype700 = 0;
-            $xfer += $input->readListBegin($_etype700, $_size697);
-            for ($_i701 = 0; $_i701 < $_size697; ++$_i701)
+            $_size713 = 0;
+            $_etype716 = 0;
+            $xfer += $input->readListBegin($_etype716, $_size713);
+            for ($_i717 = 0; $_i717 < $_size713; ++$_i717)
             {
-              $elem702 = null;
-              $elem702 = new \metastore\Partition();
-              $xfer += $elem702->read($input);
-              $this->success []= $elem702;
+              $elem718 = null;
+              $elem718 = new \metastore\Partition();
+              $xfer += $elem718->read($input);
+              $this->success []= $elem718;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20667,9 +21151,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter703)
+          foreach ($this->success as $iter719)
           {
-            $xfer += $iter703->write($output);
+            $xfer += $iter719->write($output);
           }
         }
         $output->writeListEnd();
@@ -20889,15 +21373,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size704 = 0;
-            $_etype707 = 0;
-            $xfer += $input->readListBegin($_etype707, $_size704);
-            for ($_i708 = 0; $_i708 < $_size704; ++$_i708)
+            $_size720 = 0;
+            $_etype723 = 0;
+            $xfer += $input->readListBegin($_etype723, $_size720);
+            for ($_i724 = 0; $_i724 < $_size720; ++$_i724)
             {
-              $elem709 = null;
-              $elem709 = new \metastore\PartitionSpec();
-              $xfer += $elem709->read($input);
-              $this->success []= $elem709;
+              $elem725 = null;
+              $elem725 = new \metastore\PartitionSpec();
+              $xfer += $elem725->read($input);
+              $this->success []= $elem725;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20941,9 +21425,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter710)
+          foreach ($this->success as $iter726)
           {
-            $xfer += $iter710->write($output);
+            $xfer += $iter726->write($output);
           }
         }
         $output->writeListEnd();
@@ -21150,14 +21634,14 @@ class ThriftHiveMetastore_get_partition_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size711 = 0;
-            $_etype714 = 0;
-            $xfer += $input->readListBegin($_etype714, $_size711);
-            for ($_i715 = 0; $_i715 < $_size711; ++$_i715)
+            $_size727 = 0;
+            $_etype730 = 0;
+            $xfer += $input->readListBegin($_etype730, $_size727);
+            for ($_i731 = 0; $_i731 < $_size727; ++$_i731)
             {
-              $elem716 = null;
-              $xfer += $input->readString($elem716);
-              $this->success []= $elem716;
+              $elem732 = null;
+              $xfer += $input->readString($elem732);
+              $this->success []= $elem732;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21193,9 +21677,9 @@ class ThriftHiveMetastore_get_partition_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter717)
+          foreach ($this->success as $iter733)
           {
-            $xfer += $output->writeString($iter717);
+            $xfer += $output->writeString($iter733);
           }
         }
         $output->writeListEnd();
@@ -21311,14 +21795,14 @@ class ThriftHiveMetastore_get_partitions_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size718 = 0;
-            $_etype721 = 0;
-            $xfer += $input->readListBegin($_etype721, $_size718);
-            for ($_i722 = 0; $_i722 < $_size718; ++$_i722)
+            $_size734 = 0;
+            $_etype737 = 0;
+            $xfer += $input->readListBegin($_etype737, $_size734);
+            for ($_i738 = 0; $_i738 < $_size734; ++$_i738)
             {
-              $elem723 = null;
-              $xfer += $input->readString($elem723);
-              $this->part_vals []= $elem723;
+              $elem739 = null;
+              $xfer += $input->readString($elem739);
+              $this->part_vals []= $elem739;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21363,9 +21847,9 @@ class ThriftHiveMetastore_get_partitions_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter724)
+          foreach ($this->part_vals as $iter740)
           {
-            $xfer += $output->writeString($iter724);
+            $xfer += $output->writeString($iter740);
           }
         }
         $output->writeListEnd();
@@ -21459,15 +21943,15 @@ class ThriftHiveMetastore_get_partitions_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size725 = 0;
-            $_etype728 = 0;
-            $xfer += $input->readListBegin($_etype728, $_size725);
-            for ($_i729 = 0; $_i729 < $_size725; ++$_i729)
+            $_size741 = 0;
+            $_etype744 = 0;
+            $xfer += $input->readListBegin($_etype744, $_size741);
+            for ($_i745 = 0; $_i745 < $_size741; ++$_i745)
             {
-              $elem730 = null;
-              $elem730 = new \metastore\Partition();
-              $xfer += $elem730->read($input);
-              $this->success []= $elem730;
+              $elem746 = null;
+              $elem746 = new \metastore\Partition();
+              $xfer += $elem746->read($input);
+              $this->success []= $elem746;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21511,9 +21995,9 @@ class ThriftHiveMetastore_get_partitions_ps_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter731)
+          foreach ($this->success as $iter747)
           {
-            $xfer += $iter731->write($output);
+            $xfer += $iter747->write($output);
           }
         }
         $output->writeListEnd();
@@ -21660,14 +22144,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size732 = 0;
-            $_etype735 = 0;
-            $xfer += $input->readListBegin($_etype735, $_size732);
-            for ($_i736 = 0; $_i736 < $_size732; ++$_i736)
+            $_size748 = 0;
+            $_etype751 = 0;
+            $xfer += $input->readListBegin($_etype751, $_size748);
+            for ($_i752 = 0; $_i752 < $_size748; ++$_i752)
             {
-              $elem737 = null;
-              $xfer += $input->readString($elem737);
-              $this->part_vals []= $elem737;
+              $elem753 = null;
+              $xfer += $input->readString($elem753);
+              $this->part_vals []= $elem753;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21691,14 +22175,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 6:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size738 = 0;
-            $_etype741 = 0;
-            $xfer += $input->readListBegin($_etype741, $_size738);
-            for ($_i742 = 0; $_i742 < $_size738; ++$_i742)
+            $_size754 = 0;
+            $_etype757 = 0;
+            $xfer += $input->readListBegin($_etype757, $_size754);
+            for ($_i758 = 0; $_i758 < $_size754; ++$_i758)
             {
-              $elem743 = null;
-              $xfer += $input->readString($elem743);
-              $this->group_names []= $elem743;
+              $elem759 = null;
+              $xfer += $input->readString($elem759);
+              $this->group_names []= $elem759;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21736,9 +22220,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter744)
+          foreach ($this->part_vals as $iter760)
           {
-            $xfer += $output->writeString($iter744);
+            $xfer += $output->writeString($iter760);
           }
         }
         $output->writeListEnd();
@@ -21763,9 +22247,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter745)
+          foreach ($this->group_names as $iter761)
           {
-            $xfer += $output->writeString($iter745);
+            $xfer += $output->writeString($iter761);
           }
         }
         $output->writeListEnd();
@@ -21854,15 +22338,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size746 = 0;
-            $_etype749 = 0;
-            $xfer += $input->readListBegin($_etype749, $_size746);
-            for ($_i750 = 0; $_i750 < $_size746; ++$_i750)
+            $_size762 = 0;
+            $_etype765 = 0;
+            $xfer += $input->readListBegin($_etype765, $_size762);
+            for ($_i766 = 0; $_i766 < $_size762; ++$_i766)
             {
-              $elem751 = null;
-              $elem751 = new \metastore\Partition();
-              $xfer += $elem751->read($input);
-              $this->success []= $elem751;
+              $elem767 = null;
+              $elem767 = new \metastore\Partition();
+              $xfer += $elem767->read($input);
+              $this->success []= $elem767;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21906,9 +22390,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter752)
+          foreach ($this->success as $iter768)
           {
-            $xfer += $iter752->write($output);
+            $xfer += $iter768->write($output);
           }
         }
         $output->writeListEnd();
@@ -22029,14 +22513,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size753 = 0;
-            $_etype756 = 0;
-            $xfer += $input->readListBegin($_etype756, $_size753);
-            for ($_i757 = 0; $_i757 < $_size753; ++$_i757)
+            $_size769 = 0;
+            $_etype772 = 0;
+            $xfer += $input->readListBegin($_etype772, $_size769);
+            for ($_i773 = 0; $_i773 < $_size769; ++$_i773)
             {
-              $elem758 = null;
-              $xfer += $input->readString($elem758);
-              $this->part_vals []= $elem758;
+              $elem774 = null;
+              $xfer += $input->readString($elem774);
+              $this->part_vals []= $elem774;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22081,9 +22565,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter759)
+          foreach ($this->part_vals as $iter775)
           {
-            $xfer += $output->writeString($iter759);
+            $xfer += $output->writeString($iter775);
           }
         }
         $output->writeListEnd();
@@ -22176,14 +22660,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size760 = 0;
-            $_etype763 = 0;
-            $xfer += $input->readListBegin($_etype763, $_size760);
-            for ($_i764 = 0; $_i764 < $_size760; ++$_i764)
+            $_size776 = 0;
+            $_etype779 = 0;
+            $xfer += $input->readListBegin($_etype779, $_size776);
+            for ($_i780 = 0; $_i780 < $_size776; ++$_i780)
             {
-              $elem765 = null;
-              $xfer += $input->readString($elem765);
-              $this->success []= $elem765;
+              $elem781 = null;
+              $xfer += $input->readString($elem781);
+              $this->success []= $elem781;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22227,9 +22711,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter766)
+          foreach ($this->success as $iter782)
           {
-            $xfer += $output->writeString($iter766);
+            $xfer += $output->writeString($iter782);
           }
         }
         $output->writeListEnd();
@@ -22472,15 +22956,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size767 = 0;
-            $_etype770 = 0;
-            $xfer += $input->readListBegin($_etype770, $_size767);
-            for ($_i771 = 0; $_i771 < $_size767; ++$_i771)
+            $_size783 = 0;
+            $_etype786 = 0;
+            $xfer += $input->readListBegin($_etype786, $_size783);
+            for ($_i787 = 0; $_i787 < $_size783; ++$_i787)
             {
-              $elem772 = null;
-              $elem772 = new \metastore\Partition();
-              $xfer += $elem772->read($input);
-              $this->success []= $elem772;
+              $elem788 = null;
+              $elem788 = new \metastore\Partition();
+              $xfer += $elem788->read($input);
+              $this->success []= $elem788;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22524,9 +23008,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter773)
+          foreach ($this->success as $iter789)
           {
-            $xfer += $iter773->write($output);
+            $xfer += $iter789->write($output);
           }
         }
         $output->writeListEnd();
@@ -22769,15 +23253,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size774 = 0;
-            $_etype777 = 0;
-            $xfer += $input->readListBegin($_etype777, $_size774);
-            for ($_i778 = 0; $_i778 < $_size774; ++$_i778)
+            $_size790 = 0;
+            $_etype793 = 0;
+            $xfer += $input->readListBegin($_etype793, $_size790);
+            for ($_i794 = 0; $_i794 < $_size790; ++$_i794)
             {
-              $elem779 = null;
-              $elem779 = new \metastore\PartitionSpec();
-              $xfer += $elem779->read($input);
-              $this->success []= $elem779;
+              $elem795 = null;
+              $elem795 = new \metastore\PartitionSpec();
+              $xfer += $elem795->read($input);
+              $this->success []= $elem795;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22821,9 +23305,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter780)
+          foreach ($this->success as $iter796)
           {
-            $xfer += $iter780->write($output);
+            $xfer += $iter796->write($output);
           }
         }
         $output->writeListEnd();
@@ -23143,14 +23627,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->names = array();
-            $_size781 = 0;
-            $_etype784 = 0;
-            $xfer += $input->readListBegin($_etype784, $_size781);
-            for ($_i785 = 0; $_i785 < $_size781; ++$_i785)
+            $_size797 = 0;
+            $_etype800 = 0;
+            $xfer += $input->readListBegin($_etype800, $_size797);
+            for ($_i801 = 0; $_i801 < $_size797; ++$_i801)
             {
-              $elem786 = null;
-              $xfer += $input->readString($elem786);
-              $this->names []= $elem786;
+              $elem802 = null;
+              $xfer += $input->readString($elem802);
+              $this->names []= $elem802;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23188,9 +23672,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
       {
         $output->writeListBegin(TType::STRING, count($this->names));
         {
-          foreach ($this->names as $iter787)
+          foreach ($this->names as $iter803)
           {
-            $xfer += $output->writeString($iter787);
+            $xfer += $output->writeString($iter803);
           }
         }
         $output->writeListEnd();
@@ -23279,15 +23763,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size788 = 0;
-            $_etype791 = 0;
-            $xfer += $input->readListBegin($_etype791, $_size788);
-            for ($_i792 = 0; $_i792 < $_size788; ++$_i792)
+            $_size804 = 0;
+            $_etype807 = 0;
+            $xfer += $input->readListBegin($_etype807, $_size804);
+            for ($_i808 = 0; $_i808 < $_size804; ++$_i808)
             {
-              $elem793 = null;
-              $elem793 = new \metastore\Partition();
-              $xfer += $elem793->read($input);
-              $this->success []= $elem793;
+              $elem809 = null;
+              $elem809 = new \metastore\Partition();
+              $xfer += $elem809->read($input);
+              $this->success []= $elem809;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23331,9 +23815,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter794)
+          foreach ($this->success as $iter810)
           {
-            $xfer += $iter794->write($output);
+            $xfer += $iter810->write($output);
           }
         }
         $output->writeListEnd();
@@ -23672,15 +24156,15 @@ class ThriftHiveMetastore_alter_partitions_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size795 = 0;
-            $_etype798 = 0;
-            $xfer += $input->readListBegin($_etype798, $_size795);
-            for ($_i799 = 0; $_i799 < $_size795; ++$_i799)
+            $_size811 = 0;
+            $_etype814 = 0;
+            $xfer += $input->readListBegin($_etype814, $_size811);
+            for ($_i815 = 0; $_i815 < $_size811; ++$_i815)
             {
-              $elem800 = null;
-              $elem800 = new \metastore\Partition();
-              $xfer += $elem800->read($input);
-              $this->new_parts []= $elem800;
+              $elem816 = null;
+              $elem816 = new \metastore\Partition();
+              $xfer += $elem816->read($input);
+              $this->new_parts []= $elem816;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23718,9 +24202,9 @@ class ThriftHiveMetastore_alter_partitions_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter801)
+          foreach ($this->new_parts as $iter817)
           {
-            $xfer += $iter801->write($output);
+            $xfer += $iter817->write($output);
           }
         }
         $output->writeListEnd();
@@ -24190,14 +24674,14 @@ class ThriftHiveMetastore_rename_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size802 = 0;
-            $_etype805 = 0;
-            $xfer += $input->readListBegin($_etype805, $_size802);
-            for ($_i806 = 0; $_i806 < $_size802; ++$_i806)
+            $_size818 = 0;
+            $_etype821 = 0;
+            $xfer += $input->readListBegin($_etype821, $_size818);
+            for ($_i822 = 0; $_i822 < $_size818; ++$_i822)
             {
-              $elem807 = null;
-              $xfer += $input->readString($elem807);
-              $this->part_vals []= $elem807;
+              $elem823 = null;
+              $xfer += $input->readString($elem823);
+              $this->part_vals []= $elem823;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24243,9 +24727,9 @@ class ThriftHiveMetastore_rename_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter808)
+          foreach ($this->part_vals as $iter824)
           {
-            $xfer += $output->writeString($iter808);
+            $xfer += $output->writeString($iter824);
           }
         }
         $output->writeListEnd();
@@ -24430,14 +24914,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size809 = 0;
-            $_etype812 = 0;
-            $xfer += $input->readListBegin($_etype812, $_size809);
-            for ($_i813 = 0; $_i813 < $_size809; ++$_i813)
+            $_size825 = 0;
+            $_etype828 = 0;
+            $xfer += $input->readListBegin($_etype828, $_size825);
+            for ($_i829 = 0; $_i829 < $_size825; ++$_i829)
             {
-              $elem814 = null;
-              $xfer += $input->readString($elem814);
-              $this->part_vals []= $elem814;
+              $elem830 = null;
+              $xfer += $input->readString($elem830);
+              $this->part_vals []= $elem830;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24472,9 +24956,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter815)
+          foreach ($this->part_vals as $iter831)
           {
-            $xfer += $output->writeString($iter815);
+            $xfer += $output->writeString($iter831);
           }
         }
         $output->writeListEnd();
@@ -24928,14 +25412,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size816 = 0;
-            $_etype819 = 0;
-            $xfer += $input->readListBegin($_etype819, $_size816);
-            for ($_i820 = 0; $_i820 < $_size816; ++$_i820)
+            $_size832 = 0;
+            $_etype835 = 0;
+            $xfer += $input->readListBegin($_etype835, $_size832);
+            for ($_i836 = 0; $_i836 < $_size832; ++$_i836)
             {
-              $elem821 = null;
-              $xfer += $input->readString($elem821);
-              $this->success []= $elem821;
+              $elem837 = null;
+              $xfer += $input->readString($elem837);
+              $this->success []= $elem837;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -24971,9 +25455,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter822)
+          foreach ($this->success as $iter838)
           {
-            $xfer += $output->writeString($iter822);
+            $xfer += $output->writeString($iter838);
           }
         }
         $output->writeListEnd();
@@ -25133,17 +25617,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size823 = 0;
-            $_ktype824 = 0;
-            $_vtype825 = 0;
-            $xfer += $input->readMapBegin($_ktype824, $_vtype825, $_size823);
-            for ($_i827 = 0; $_i827 < $_size823; ++$_i827)
+            $_size839 = 0;
+            $_ktype840 = 0;
+            $_vtype841 = 0;
+            $xfer += $input->readMapBegin($_ktype840, $_vtype841, $_size839);
+            for ($_i843 = 0; $_i843 < $_size839; ++$_i843)
             {
-              $key828 = '';
-              $val829 = '';
-              $xfer += $input->readString($key828);
-              $xfer += $input->readString($val829);
-              $this->success[$key828] = $val829;
+              $key844 = '';
+              $val845 = '';
+              $xfer += $input->readString($key844);
+              $xfer += $input->readString($val845);
+              $this->success[$key844] = $val845;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -25179,10 +25663,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
         {
-          foreach ($this->success as $kiter830 => $viter831)
+          foreach ($this->success as $kiter846 => $viter847)
           {
-            $xfer += $output->writeString($kiter830);
-            $xfer += $output->writeString($viter831);
+            $xfer += $output->writeString($kiter846);
+            $xfer += $output->writeString($viter847);
           }
         }
         $output->writeMapEnd();
@@ -25302,17 +25786,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size832 = 0;
-            $_ktype833 = 0;
-            $_vtype834 = 0;
-            $xfer += $input->readMapBegin($_ktype833, $_vtype834, $_size832);
-            for ($_i836 = 0; $_i836 < $_size832; ++$_i836)
+            $_size848 = 0;
+            $_ktype849 = 0;
+            $_vtype850 = 0;
+            $xfer += $input->readMapBegin($_ktype849, $_vtype850, $_size848);
+            for ($_i852 = 0; $_i852 < $_size848; ++$_i852)
             {
-              $key837 = '';
-              $val838 = '';
-              $xfer += $input->readString($key837);
-              $xfer += $input->readString($val838);
-              $this->part_vals[$key837] = $val838;
+              $key853 = '';
+              $val854 = '';
+              $xfer += $input->readString($key853);
+              $xfer += $input->readString($val854);
+              $this->part_vals[$key853] = $val854;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -25357,10 +25841,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter839 => $viter840)
+          foreach ($this->part_vals as $kiter855 => $viter856)
           {
-            $xfer += $output->writeString($kiter839);
-            $xfer += $output->writeString($viter840);
+            $xfer += $output->writeString($kiter855);
+            $xfer += $output->writeString($viter856);
           }
         }
         $output->writeMapEnd();
@@ -25682,17 +26166,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
         case 3:
           if ($ftype == TType::MAP) {
             $this->part_vals = array();
-            $_size841 = 0;
-            $_ktype842 = 0;
-            $_vtype843 = 0;
-            $xfer += $input->readMapBegin($_ktype842, $_vtype843, $_size841);
-            for ($_i845 = 0; $_i845 < $_size841; ++$_i845)
+            $_size857 = 0;
+            $_ktype858 = 0;
+            $_vtype859 = 0;
+            $xfer += $input->readMapBegin($_ktype858, $_vtype859, $_size857);
+            for ($_i861 = 0; $_i861 < $_size857; ++$_i861)
             {
-              $key846 = '';
-              $val847 = '';
-              $xfer += $input->readString($key846);
-              $xfer += $input->readString($val847);
-              $this->part_vals[$key846] = $val847;
+              $key862 = '';
+              $val863 = '';
+              $xfer += $input->readString($key862);
+              $xfer += $input->readString($val863);
+              $this->part_vals[$key862] = $val863;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -25737,10 +26221,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $kiter848 => $viter849)
+          foreach ($this->part_vals as $kiter864 => $viter865)
           {
-            $xfer += $output->writeString($kiter848);
-            $xfer += $output->writeString($viter849);
+            $xfer += $output->writeString($kiter864);
+            $xfer += $output->writeString($viter865);
           }
         }
         $output->writeMapEnd();
@@ -27214,15 +27698,15 @@ class ThriftHiveMetastore_get_indexes_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size850 = 0;
-            $_etype853 = 0;
-            $xfer += $input->readListBegin($_etype853, $_size850);
-            for ($_i854 = 0; $_i854 < $_size850; ++$_i854)
+            $_size866 = 0;
+            $_etype869 = 0;
+            $xfer += $input->readListBegin($_etype869, $_size866);
+            for ($_i870 = 0; $_i870 < $_size866; ++$_i870)
             {
-              $elem855 = null;
-              $elem855 = new \metastore\Index();
-              $xfer += $elem855->read($input);
-              $this->success []= $elem855;
+              $elem871 = null;
+              $elem871 = new \metastore\Index();
+              $xfer += $elem871->read($input);
+              $this->success []= $elem871;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27266,9 +27750,9 @@ class ThriftHiveMetastore_get_indexes_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter856)
+          foreach ($this->success as $iter872)
           {
-            $xfer += $iter856->write($output);
+            $xfer += $iter872->write($output);
           }
         }
         $output->writeListEnd();
@@ -27475,14 +27959,14 @@ class ThriftHiveMetastore_get_index_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size857 = 0;
-            $_etype860 = 0;
-            $xfer += $input->readListBegin($_etype860, $_size857);
-            for ($_i861 = 0; $_i861 < $_size857; ++$_i861)
+            $_size873 = 0;
+            $_etype876 = 0;
+            $xfer += $input->readListBegin($_etype876, $_size873);
+            for ($_i877 = 0; $_i877 < $_size873; ++$_i877)
             {
-              $elem862 = null;
-              $xfer += $input->readString($elem862);
-              $this->success []= $elem862;
+              $elem878 = null;
+              $xfer += $input->readString($elem878);
+              $this->success []= $elem878;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27518,9 +28002,9 @@ class ThriftHiveMetastore_get_index_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter863)
+          foreach ($this->success as $iter879)
           {
-            $xfer += $output->writeString($iter863);
+            $xfer += $output->writeString($iter879);
           }
         }
         $output->writeListEnd();
@@ -30994,14 +31478,14 @@ class ThriftHiveMetastore_get_functions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size864 = 0;
-            $_etype867 = 0;
-            $xfer += $input->readListBegin($_etype867, $_size864);
-            for ($_i868 = 0; $_i868 < $_size864; ++$_i868)
+            $_size880 = 0;
+            $_etype883 = 0;
+            $xfer += $input->readListBegin($_etype883, $_size880);
+            for ($_i884 = 0; $_i884 < $_size880; ++$_i884)
             {
-              $elem869 = null;
-              $xfer += $input->readString($elem869);
-              $this->success []= $elem869;
+              $elem885 = null;
+              $xfer += $input->readString($elem885);
+              $this->success []= $elem885;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31037,9 +31521,9 @@ class ThriftHiveMetastore_get_functions_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter870)
+          foreach ($this->success as $iter886)
           {
-            $xfer += $output->writeString($iter870);
+            $xfer += $output->writeString($iter886);
           }
         }
         $output->writeListEnd();
@@ -31908,14 +32392,14 @@ class ThriftHiveMetastore_get_role_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size871 = 0;
-            $_etype874 = 0;
-            $xfer += $input->readListBegin($_etype874, $_size871);
-            for ($_i875 = 0; $_i875 < $_size871; ++$_i875)
+            $_size887 = 0;
+            $_etype890 = 0;
+            $xfer += $input->readListBegin($_etype890, $_size887);
+            for ($_i891 = 0; $_i891 < $_size887; ++$_i891)
             {
-              $elem876 = null;
-              $xfer += $input->readString($elem876);
-              $this->success []= $elem876;
+              $elem892 = null;
+              $xfer += $input->readString($elem892);
+              $this->success []= $elem892;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31951,9 +32435,9 @@ class ThriftHiveMetastore_get_role_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter877)
+          foreach ($this->success as $iter893)
           {
-            $xfer += $output->writeString($iter877);
+            $xfer += $output->writeString($iter893);
           }
         }
         $output->writeListEnd();
@@ -32644,15 +33128,15 @@ class ThriftHiveMetastore_list_roles_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size878 = 0;
-            $_etype881 = 0;
-            $xfer += $input->readListBegin($_etype881, $_size878);
-            for ($_i882 = 0; $_i882 < $_size878; ++$_i882)
+            $_size894 = 0;
+            $_etype897 = 0;
+            $xfer += $input->readListBegin($_etype897, $_size894);
+            for ($_i898 = 0; $_i898 < $_size894; ++$_i898)
             {
-              $elem883 = null;
-              $elem883 = new \metastore\Role();
-              $xfer += $elem883->read($input);
-              $this->success []= $elem883;
+              $elem899 = null;
+              $elem899 = new \metastore\Role();
+              $xfer += $elem899->read($input);
+              $this->success []= $elem899;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32688,9 +33172,9 @@ class ThriftHiveMetastore_list_roles_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter884)
+          foreach ($this->success as $iter900)
           {
-            $xfer += $iter884->write($output);
+            $xfer += $iter900->write($output);
           }
         }
         $output->writeListEnd();
@@ -33352,14 +33836,14 @@ class ThriftHiveMetastore_get_privilege_set_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size885 = 0;
-            $_etype888 = 0;
-            $xfer += $input->readListBegin($_etype888, $_size885);
-            for ($_i889 = 0; $_i889 < $_size885; ++$_i889)
+            $_size901 = 0;
+            $_etype904 = 0;
+            $xfer += $input->readListBegin($_etype904, $_size901);
+            for ($_i905 = 0; $_i905 < $_size901; ++$_i905)
             {
-              $elem890 = null;
-              $xfer += $input->readString($elem890);
-              $this->group_names []= $elem890;
+              $elem906 = null;
+              $xfer += $input->readString($elem906);
+              $this->group_names []= $elem906;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33400,9 +33884,9 @@ class ThriftHiveMetastore_get_privilege_set_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter891)
+          foreach ($this->group_names as $iter907)
           {
-            $xfer += $output->writeString($iter891);
+            $xfer += $output->writeString($iter907);
           }
         }
         $output->writeListEnd();
@@ -33710,15 +34194,15 @@ class ThriftHiveMetastore_list_privileges_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size892 = 0;
-            $_etype895 = 0;
-            $xfer += $input->readListBegin($_etype895, $_size892);
-            for ($_i896 = 0; $_i896 < $_size892; ++$_i896)
+            $_size908 = 0;
+            $_etype911 = 0;
+            $xfer += $input->readListBegin($_etype911, $_size908);
+            for ($_i912 = 0; $_i912 < $_size908; ++$_i912)
             {
-              $elem897 = null;
-              $elem897 = new \metastore\HiveObjectPrivilege();
-              $xfer += $elem897->read($input);
-              $this->success []= $elem897;
+              $elem913 = null;
+              $elem913 = new \metastore\HiveObjectPrivilege();
+              $xfer += $elem913->read($input);
+              $this->success []= $elem913;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33754,9 +34238,9 @@ class ThriftHiveMetastore_list_privileges_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter898)
+          foreach ($this->success as $iter914)
           {
-            $xfer += $iter898->write($output);
+            $xfer += $iter914->write($output);
           }
         }
         $output->writeListEnd();
@@ -34388,14 +34872,14 @@ class ThriftHiveMetastore_set_ugi_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size899 = 0;
-            $_etype902 = 0;
-            $xfer += $input->readListBegin($_etype902, $_size899);
-            for ($_i903 = 0; $_i903 < $_size899; ++$_i903)
+            $_size915 = 0;
+            $_etype918 = 0;
+            $xfer += $input->readListBegin($_etype918, $_size915);
+            for ($_i919 = 0; $_i919 < $_size915; ++$_i919)
             {
-              $elem904 = null;
-              $xfer += $input->readString($elem904);
-              $this->group_names []= $elem904;
+              $elem920 = null;
+              $xfer += $input->readString($elem920);
+              $this->group_names []= $elem920;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -34428,9 +34912,9 @@ class ThriftHiveMetastore_set_ugi_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter905)
+          foreach ($this->group_names as $iter921)
           {
-            $xfer += $output->writeString($iter905);
+            $xfer += $output->writeString($iter921);
           }
         }
         $output->writeListEnd();
@@ -34506,14 +34990,14 @@ class ThriftHiveMetastore_set_ugi_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size906 = 0;
-            $_etype909 = 0;
-            $xfer += $input->readListBegin($_etype909, $_size906);
-            for ($_i910 = 0; $_i910 < $_size906; ++$_i910)
+            $_size922 = 0;
+            $_etype925 = 0;
+            $xfer += $input->readListBegin($_etype925, $_size922);
+            for ($_i926 = 0; $_i926 < $_size922; ++$_i926)
             {
-              $elem911 = null;
-              $xfer += $input->readString($elem911);
-              $this->success []= $elem911;
+              $elem927 = null;
+              $xfer += $input->readString($elem927);
+              $this->success []= $elem927;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -34549,9 +35033,9 @@ class ThriftHiveMetastore_set_ugi_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter912)
+          foreach ($this->success as $iter928)
           {
-            $xfer += $output->writeString($iter912);
+            $xfer += $output->writeString($iter928);
           }
         }
         $output->writeListEnd();

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 8dba17b..65ba10e 100755
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -68,6 +68,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('  DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)')
   print('  Partition get_partition(string db_name, string tbl_name,  part_vals)')
   print('  Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)')
+  print('   exchange_partitions( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)')
   print('  Partition get_partition_with_auth(string db_name, string tbl_name,  part_vals, string user_name,  group_names)')
   print('  Partition get_partition_by_name(string db_name, string tbl_name, string part_name)')
   print('   get_partitions(string db_name, string tbl_name, i16 max_parts)')
@@ -486,6 +487,12 @@ elif cmd == 'exchange_partition':
     sys.exit(1)
   pp.pprint(client.exchange_partition(eval(args[0]),args[1],args[2],args[3],args[4],))
 
+elif cmd == 'exchange_partitions':
+  if len(args) != 5:
+    print('exchange_partitions requires 5 args')
+    sys.exit(1)
+  pp.pprint(client.exchange_partitions(eval(args[0]),args[1],args[2],args[3],args[4],))
+
 elif cmd == 'get_partition_with_auth':
   if len(args) != 5:
     print('get_partition_with_auth requires 5 args')


[37/55] [abbrv] hive git commit: HIVE-12317: Emit current database in lineage info (Jimmy, reviewed by Yongzhi)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/11f5d449/ql/src/test/results/clientpositive/lineage2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage2.q.out b/ql/src/test/results/clientpositive/lineage2.q.out
index 0185d43..aed41b0 100644
--- a/ql/src/test/results/clientpositive/lineage2.q.out
+++ b/ql/src/test/results/clientpositive/lineage2.q.out
@@ -5,12 +5,12 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@src2
-{"version":"1.0","engine":"mr","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create table src2 as select key key2, value value2 from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create table src2 as select key key2, value value2 from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 PREHOOK: query: select * from src1 where key is not null and value is not null limit 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select * from src1 where key is not null and value is not null limit 3","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"(src1.key is not null and src1.value is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select * from src1 where key is not null and value is not null limit 3","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"(src1.key is not null and src1.value is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 238	val_238
 	
 311	val_311
@@ -18,7 +18,7 @@ PREHOOK: query: select * from src1 where key > 10 and value > 'val' order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 146	val_146
 150	val_150
 213	val_213
@@ -31,17 +31,17 @@ PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src1
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create table dest1 as select * from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create table dest1 as select * from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 PREHOOK: query: insert into table dest1 select * from src2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert into table dest1 select * from src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert into table dest1 select * from src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: select key k, dest1.value from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select key k, dest1.value from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select key k, dest1.value from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 238	val_238
 	
 311	val_311
@@ -97,7 +97,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"50fa3d1074b3fda37ce11dc6ec92ebf3","queryText":"select key from src1 union select key2 from src2 order by key","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"50fa3d1074b3fda37ce11dc6ec92ebf3","queryText":"select key from src1 union select key2 from src2 order by key","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
 
 128
 146
@@ -119,7 +119,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a739460bd79c8c91ec35e22c97329769","queryText":"select key k from src1 union select key2 from src2 order by k","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.k"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a739460bd79c8c91ec35e22c97329769","queryText":"select key k from src1 union select key2 from src2 order by k","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.k"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}
 
 128
 146
@@ -140,7 +140,7 @@ PREHOOK: query: select key, count(1) a from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3901b5e3a164064736b3234355046340","queryText":"select key, count(1) a from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(1)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3901b5e3a164064736b3234355046340","queryText":"select key, count(1) a from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(1)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
 	20
 128	2
 146	2
@@ -161,7 +161,7 @@ PREHOOK: query: select key k, count(*) from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"0d5a212f10847aeaab31e8c31121e6d4","queryText":"select key k, count(*) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(*)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0d5a212f10847aeaab31e8c31121e6d4","queryText":"select key k, count(*) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(*)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
 	20
 128	2
 146	2
@@ -182,7 +182,7 @@ PREHOOK: query: select key k, count(value) from dest1 group by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"56429eccb04ded722f5bd9d9d8cf7260","queryText":"select key k, count(value) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(default.dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"56429eccb04ded722f5bd9d9d8cf7260","queryText":"select key k, count(value) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(default.dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 	20
 128	2
 146	2
@@ -203,7 +203,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"7e1cfc3dece85b41b6f7c46365580cde","queryText":"select value, max(length(key)) from dest1 group by value","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"7e1cfc3dece85b41b6f7c46365580cde","queryText":"select value, max(length(key)) from dest1 group by value","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
 	3
 val_146	3
 val_150	3
@@ -227,7 +227,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value order b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"c6578ce1dd72498c4af33f20f164e483","queryText":"select value, max(length(key)) from dest1 group by value order by value limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"c6578ce1dd72498c4af33f20f164e483","queryText":"select value, max(length(key)) from dest1 group by value order by value limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
 	3
 val_146	3
 val_150	3
@@ -237,7 +237,7 @@ PREHOOK: query: select key, length(value) from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"91fbcea5cb34362071555cd93e8d0abe","queryText":"select key, length(value) from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"length(dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"91fbcea5cb34362071555cd93e8d0abe","queryText":"select key, length(value) from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"length(dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 238	7
 	0
 311	7
@@ -292,7 +292,7 @@ PREHOOK: query: select length(value) + 3 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"3d8a347cc9052111cb328938d37b9b03","queryText":"select length(value) + 3 from dest1","edges":[{"sources":[1],"targets":[0],"expression":"(length(dest1.value) + 3)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3d8a347cc9052111cb328938d37b9b03","queryText":"select length(value) + 3 from dest1","edges":[{"sources":[1],"targets":[0],"expression":"(length(dest1.value) + 3)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
 10
 3
 10
@@ -347,7 +347,7 @@ PREHOOK: query: select 5 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"bae960bf4376ec00e37258469b17360d","queryText":"select 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"5","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bae960bf4376ec00e37258469b17360d","queryText":"select 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"5","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
 5
 5
 5
@@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
 15
 15
 15
@@ -461,31 +461,31 @@ PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"386791c174a4999fc916e300b5e76bf2","queryText":"create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.val
 ue2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"386791c174a4999fc916e300b5e76bf2","queryText":"create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertex
 Id":"default.src2.value2"}]}
 PREHOOK: query: insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"e494b771d94800dc3430bf5d0810cd9f","queryText":"insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.s
 rc2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e494b771d94800dc3430bf5d0810cd9f","queryText":"insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN",
 "vertexId":"default.src2.value2"}]}
 PREHOOK: query: insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"efeaddd0d36105b1013b414627850dc2","queryText":"insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.v
 alue2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"efeaddd0d36105b1013b414627850dc2","queryText":"insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vert
 exId":"default.src2.value2"}]}
 PREHOOK: query: insert into table dest2
   select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"e9450a56b3d103642e06bef0e4f0d482","queryText":"insert into table dest2\n  select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,7],"targets":[0,1,2,3],"expression":"(length(src1.value) = (length(src2.value2) + 1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"i
 d":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e9450a56b3d103642e06bef0e4f0d482","queryText":"insert into table dest2\n  select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,7],"targets":[0,1,2,3],"expression":"(length(src1.value) = (length(src2.value2) + 1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"de
 fault.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: select * from src1 where length(key) > 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"4028c94d222d5dd221f651d414386972","queryText":"select * from src1 where length(key) > 2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(length(src1.key) > 2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"4028c94d222d5dd221f651d414386972","queryText":"select * from src1 where length(key) > 2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(length(src1.key) > 2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 238	val_238
 311	val_311
 255	val_255
@@ -503,7 +503,7 @@ PREHOOK: query: select * from src1 where length(key) > 2 and value > 'a'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"5727531f7743cfcd60d634d8c835515f","queryText":"select * from src1 where length(key) > 2 and value > 'a'","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((length(src1.key) > 2) and (src1.value > 'a'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"5727531f7743cfcd60d634d8c835515f","queryText":"select * from src1 where length(key) > 2 and value > 'a'","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((length(src1.key) > 2) and (src1.value > 'a'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 238	val_238
 311	val_311
 255	val_255
@@ -523,14 +523,14 @@ PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest3
-{"version":"1.0","engine":"mr","hash":"a2c4e9a3ec678039814f5d84b1e38ce4","queryText":"create table dest3 as\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest3.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest3.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest3.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest3.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"
 },{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a2c4e9a3ec678039814f5d84b1e38ce4","queryText":"create table dest3 as\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest3.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest3.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest3.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest3.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId"
 :"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: insert overwrite table dest2
   select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 PREHOOK: Input: default@src2
 PREHOOK: Output: default@dest2
-{"version":"1.0","engine":"mr","hash":"76d84512204ddc576ad4d93f252e4358","queryText":"insert overwrite table dest2\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 3)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1
 .value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"76d84512204ddc576ad4d93f252e4358","queryText":"insert overwrite table dest2\n  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 3)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","ve
 rtexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
 PREHOOK: query: drop table if exists dest_l1
 PREHOOK: type: DROPTABLE
 PREHOOK: query: CREATE TABLE dest_l1(key INT, value STRING) STORED AS TEXTFILE
@@ -552,7 +552,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
 PREHOOK: Output: default@dest_l1
-{"version":"1.0","engine":"mr","hash":"60b589744e2527dd235a6c8168d6a653","queryText":"INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n      FROM src1 t1\n      LEFT OUTER JOIN src p1\n      ON (t1.key = p1.key)\n      UNION ALL\n      SELECT t2.key, p2.value\n      FROM src1 t2\n      LEFT OUTER JOIN src p2\n      ON (t2.key = p2.key)) j","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(j.key)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"j.value","edgeType":"PROJECTION"},{"sources":[4,2],"targets":[0,1],"expression":"(p1.key = t1.key)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src.key"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"60b589744e2527dd235a6c8168d6a653","queryText":"INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n      FROM src1 t1\n      LEFT OUTER JOIN src p1\n      ON (t1.key = p1.key)\n      UNION ALL\n      SELECT t2.key, p2.value\n      FROM src1 t2\n      LEFT OUTER JOIN src p2\n      ON (t2.key = p2.key)) j","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(j.key)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"j.value","edgeType":"PROJECTION"},{"sources":[4,2],"targets":[0,1],"expression":"(p1.key = t1.key)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src.key"}]}
 PREHOOK: query: drop table if exists emp
 PREHOOK: type: DROPTABLE
 PREHOOK: query: drop table if exists dept
@@ -593,7 +593,7 @@ PREHOOK: Input: default@dept
 PREHOOK: Input: default@emp
 PREHOOK: Input: default@project
 PREHOOK: Output: default@tgt
-{"version":"1.0","engine":"mr","hash":"f59797e0422d2e51515063374dfac361","queryText":"INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n  SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n  FROM (\n    SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n    FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n    ) em\n  JOIN dept d ON d.dept_id = em.dept_id\n  ) emd JOIN project p ON emd.dept_id = p.project_id","edges":[{"sources":[6],"targets":[0],"edgeType":"PROJECTION"},{"sources":[7],"targets":[1],"edgeType":"PROJECTION"},{"sources":[8],"targets":[2,3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3,4,5],"expression":"(e.emp_id = m.emp_id)","edgeType":"PREDICATE"},{"sources":[11,12],"targets":[0,1,2,3,4,5],"expression":"(em._col1 = d.dept_id)","edgeType":"PREDICATE"},{"sources":[1
 1,9],"targets":[0,1,2,3,4,5],"expression":"(emd._col4 = p.project_id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.tgt.dept_name"},{"id":1,"vertexType":"COLUMN","vertexId":"default.tgt.name"},{"id":2,"vertexType":"COLUMN","vertexId":"default.tgt.emp_id"},{"id":3,"vertexType":"COLUMN","vertexId":"default.tgt.mgr_id"},{"id":4,"vertexType":"COLUMN","vertexId":"default.tgt.proj_id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.tgt.proj_name"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dept.dept_name"},{"id":7,"vertexType":"COLUMN","vertexId":"default.emp.name"},{"id":8,"vertexType":"COLUMN","vertexId":"default.emp.emp_id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.project.project_id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.project.project_name"},{"id":11,"vertexType":"COLUMN","vertexId":"default.emp.dept_id"},{"id":12,"vertexType":"COLUMN","vertexId":"default.dept.dept_id"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"f59797e0422d2e51515063374dfac361","queryText":"INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n  SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n  FROM (\n    SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n    FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n    ) em\n  JOIN dept d ON d.dept_id = em.dept_id\n  ) emd JOIN project p ON emd.dept_id = p.project_id","edges":[{"sources":[6],"targets":[0],"edgeType":"PROJECTION"},{"sources":[7],"targets":[1],"edgeType":"PROJECTION"},{"sources":[8],"targets":[2,3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3,4,5],"expression":"(e.emp_id = m.emp_id)","edgeType":"PREDICATE"},{"sources":[11,12],"targets":[0,1,2,3,4,5],"expression":"(em._col1 = d.dept_id)","edgeType":"PRED
 ICATE"},{"sources":[11,9],"targets":[0,1,2,3,4,5],"expression":"(emd._col4 = p.project_id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.tgt.dept_name"},{"id":1,"vertexType":"COLUMN","vertexId":"default.tgt.name"},{"id":2,"vertexType":"COLUMN","vertexId":"default.tgt.emp_id"},{"id":3,"vertexType":"COLUMN","vertexId":"default.tgt.mgr_id"},{"id":4,"vertexType":"COLUMN","vertexId":"default.tgt.proj_id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.tgt.proj_name"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dept.dept_name"},{"id":7,"vertexType":"COLUMN","vertexId":"default.emp.name"},{"id":8,"vertexType":"COLUMN","vertexId":"default.emp.emp_id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.project.project_id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.project.project_name"},{"id":11,"vertexType":"COLUMN","vertexId":"default.emp.dept_id"},{"id":12,"vertexType":"COLUMN","vertexId":"default.dept.dept_id"}]}
 PREHOOK: query: drop table if exists dest_l2
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table dest_l2 (id int, c1 tinyint, c2 int, c3 bigint) stored as textfile
@@ -604,7 +604,7 @@ PREHOOK: query: insert into dest_l2 values(0, 1, 100, 10000)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__1
 PREHOOK: Output: default@dest_l2
-{"version":"1.0","engine":"mr","hash":"e001334e3f8384806b0f25a7c303045f","queryText":"insert into dest_l2 values(0, 1, 100, 10000)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1],"expression":"UDFToByte(values__tmp__table__1.tmp_values_col2)","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col3)","edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToLong(values__tmp__table__1.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e001334e3f8384806b0f25a7c303045f","queryText":"insert into dest_l2 values(0, 1, 100, 10000)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1],"expression":"UDFToByte(values__tmp__table__1.tmp_values_col2)","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col3)","edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToLong(values__tmp__table__1.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
 PREHOOK: query: select * from (
   select c1 + c2 x from dest_l2
   union all
@@ -612,7 +612,7 @@ PREHOOK: query: select * from (
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"a2c96a96be9d315ede966be5b45ef20e","queryText":"select * from (\n  select c1 + c2 x from dest_l2\n  union all\n  select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x","edges":[{"sources":[1,2,3],"targets":[0],"expression":"v2.x","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"v2.x"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"a2c96a96be9d315ede966be5b45ef20e","queryText":"select * from (\n  select c1 + c2 x from dest_l2\n  union all\n  select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x","edges":[{"sources":[1,2,3],"targets":[0],"expression":"v2.x","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"v2.x"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
 101
 10000
 PREHOOK: query: drop table if exists dest_l3
@@ -625,7 +625,7 @@ PREHOOK: query: insert into dest_l3 values(0, "s1", "s2", 15)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__2
 PREHOOK: Output: default@dest_l3
-{"version":"1.0","engine":"mr","hash":"09df51ba6ba2d07f2304523ee505f094","queryText":"insert into dest_l3 values(0, \"s1\", \"s2\", 15)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"09df51ba6ba2d07f2304523ee505f094","queryText":"insert into dest_l3 values(0, \"s1\", \"s2\", 15)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
 PREHOOK: query: select sum(a.c1) over (partition by a.c1 order by a.id)
 from dest_l2 a
 where a.c2 != 10
@@ -634,7 +634,7 @@ having count(a.c2) > 0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) c1) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) c1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) id)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.de
 st_l2.id"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) c1) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) c1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) id)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","
 vertexId":"default.dest_l2.id"}]}
 1
 PREHOOK: query: select sum(a.c1), count(b.c1), b.c2, b.c3
 from dest_l2 a join dest_l3 b on (a.id = b.id)
@@ -646,7 +646,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 PREHOOK: Input: default@dest_l3
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"01879c619517509d9f5b6ead998bb4bb","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[10,7],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and (b.c3 > 0))","edgeType":"PREDICATE"},{"sources":[10],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUM
 N","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"01879c619517509d9f5b6ead998bb4bb","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[10,7],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and (b.c3 > 0))","edgeType":"PREDICATE"},{"sources":[10],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":
 1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"}]}
 1	1	s2	15
 PREHOOK: query: drop table if exists t
 PREHOOK: type: DROPTABLE
@@ -659,7 +659,7 @@ PREHOOK: Input: default@dest_l2
 PREHOOK: Input: default@dest_l3
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t
-{"version":"1.0","engine":"mr","hash":"0d2f15b494111ffe236d5be42a76fa28","queryText":"create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[4,5],"targets":[0,1],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1],"expression":"((a.id > 0) and (b.c3 = 15))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.c2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.t.c3"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"0d2f15b494111ffe236d5be42a76fa28","queryText":"create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[4,5],"targets":[0,1],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1],"expression":"((a.id > 0) and (b.c3 = 15))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.c2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.t.c3"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
 PREHOOK: query: SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),
 concat(substr(src1.key,1,1),sum(substr(src1.value,5)))
 from src1
@@ -667,7 +667,7 @@ GROUP BY substr(src1.key,1,1)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"5b1022708124ee2b80f9e2e8a0dcb15c","queryText":"SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)","edges":[{"sources":[3],"targets":[0],"expression":"substr(src1.key, 1, 1)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"count(DISTINCT substr(src1.value, 5))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[2],"expression":"concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"5b1022708124ee2b80f9e2e8a0dcb15c","queryText":"SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)","edges":[{"sources":[3],"targets":[0],"expression":"substr(src1.key, 1, 1)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"count(DISTINCT substr(src1.value, 5))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[2],"expression":"concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 	7	1543.0
 1	3	1296.0
 2	6	21257.0
@@ -696,7 +696,7 @@ PREHOOK: query: select identity, ep1_id from relations
 PREHOOK: type: QUERY
 PREHOOK: Input: default@relations
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"bb30b94d13d0b35802db85b4e33230b3","queryText":"select identity, ep1_id from relations\n  lateral view explode(ep1_ids) nav_rel as ep1_id","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"nav_rel._col11","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"identity"},{"id":1,"vertexType":"COLUMN","vertexId":"ep1_id"},{"id":2,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":3,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"bb30b94d13d0b35802db85b4e33230b3","queryText":"select identity, ep1_id from relations\n  lateral view explode(ep1_ids) nav_rel as ep1_id","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"nav_rel._col11","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"identity"},{"id":1,"vertexType":"COLUMN","vertexId":"ep1_id"},{"id":2,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":3,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"}]}
 PREHOOK: query: insert into rels_exploded select identity, type,
   ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id
 from relations lateral view explode(ep1_ids) rel1 as ep1_id
@@ -704,4 +704,4 @@ from relations lateral view explode(ep1_ids) rel1 as ep1_id
 PREHOOK: type: QUERY
 PREHOOK: Input: default@relations
 PREHOOK: Output: default@rels_exploded
-{"version":"1.0","engine":"mr","hash":"e76d2efade744d1d5cf74fda064ba6c6","queryText":"insert into rels_exploded select identity, type,\n  ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id\nfrom relations lateral view explode(ep1_ids) rel1 as ep1_id\n  lateral view explode (ep2_ids) rel2 as ep2_id","edges":[{"sources":[8],"targets":[0],"edgeType":"PROJECTION"},{"sources":[9],"targets":[1],"edgeType":"PROJECTION"},{"sources":[10],"targets":[2],"edgeType":"PROJECTION"},{"sources":[11],"targets":[3],"edgeType":"PROJECTION"},{"sources":[12],"targets":[4],"edgeType":"PROJECTION"},{"sources":[13],"targets":[5],"edgeType":"PROJECTION"},{"sources":[14],"targets":[6],"expression":"CAST( rel1._col11 AS CHAR(32)","edgeType":"PROJECTION"},{"sources":[15],"targets":[7],"expression":"CAST( rel2._col12 AS CHAR(32)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.rels_exploded.identity"},{"id":1,"vertexType":"COLUMN","vertexId":"default.rels_explo
 ded.type"},{"id":2,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_src_type"},{"id":3,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_type"},{"id":4,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_src_type"},{"id":5,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_type"},{"id":6,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_id"},{"id":7,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_id"},{"id":8,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":9,"vertexType":"COLUMN","vertexId":"default.relations.type"},{"id":10,"vertexType":"COLUMN","vertexId":"default.relations.ep1_src_type"},{"id":11,"vertexType":"COLUMN","vertexId":"default.relations.ep1_type"},{"id":12,"vertexType":"COLUMN","vertexId":"default.relations.ep2_src_type"},{"id":13,"vertexType":"COLUMN","vertexId":"default.relations.ep2_type"},{"id":14,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"},{"id":15,"vertexType":"COLU
 MN","vertexId":"default.relations.ep2_ids"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"e76d2efade744d1d5cf74fda064ba6c6","queryText":"insert into rels_exploded select identity, type,\n  ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id\nfrom relations lateral view explode(ep1_ids) rel1 as ep1_id\n  lateral view explode (ep2_ids) rel2 as ep2_id","edges":[{"sources":[8],"targets":[0],"edgeType":"PROJECTION"},{"sources":[9],"targets":[1],"edgeType":"PROJECTION"},{"sources":[10],"targets":[2],"edgeType":"PROJECTION"},{"sources":[11],"targets":[3],"edgeType":"PROJECTION"},{"sources":[12],"targets":[4],"edgeType":"PROJECTION"},{"sources":[13],"targets":[5],"edgeType":"PROJECTION"},{"sources":[14],"targets":[6],"expression":"CAST( rel1._col11 AS CHAR(32)","edgeType":"PROJECTION"},{"sources":[15],"targets":[7],"expression":"CAST( rel2._col12 AS CHAR(32)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.rels_exploded.identity"},{"id":1,"vertexType":"COLUMN","vertexId
 ":"default.rels_exploded.type"},{"id":2,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_src_type"},{"id":3,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_type"},{"id":4,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_src_type"},{"id":5,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_type"},{"id":6,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep1_id"},{"id":7,"vertexType":"COLUMN","vertexId":"default.rels_exploded.ep2_id"},{"id":8,"vertexType":"COLUMN","vertexId":"default.relations.identity"},{"id":9,"vertexType":"COLUMN","vertexId":"default.relations.type"},{"id":10,"vertexType":"COLUMN","vertexId":"default.relations.ep1_src_type"},{"id":11,"vertexType":"COLUMN","vertexId":"default.relations.ep1_type"},{"id":12,"vertexType":"COLUMN","vertexId":"default.relations.ep2_src_type"},{"id":13,"vertexType":"COLUMN","vertexId":"default.relations.ep2_type"},{"id":14,"vertexType":"COLUMN","vertexId":"default.relations.ep1_ids"},{"id":
 15,"vertexType":"COLUMN","vertexId":"default.relations.ep2_ids"}]}


[08/55] [abbrv] hive git commit: HIVE-11634 : Support partition pruning for IN(STRUCT(partcol, nonpartcol..)...) (Hari Subramaniyan, reviewed by Laljo John Pullokkaran)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/pcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pcs.q.out b/ql/src/test/results/clientpositive/pcs.q.out
new file mode 100644
index 0000000..5cf0dff
--- /dev/null
+++ b/ql/src/test/results/clientpositive/pcs.q.out
@@ -0,0 +1,2249 @@
+PREHOOK: query: drop table pcs_t1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table pcs_t1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table pcs_t2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table pcs_t2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table pcs_t1 (key int, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@pcs_t1
+POSTHOOK: query: create table pcs_t1 (key int, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@pcs_t1
+PREHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcs_t1@ds=2000-04-08
+POSTHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcs_t1@ds=2000-04-09
+POSTHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcs_t1@ds=2000-04-10
+POSTHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-10
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: analyze table pcs_t1 partition(ds) compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+PREHOOK: Input: default@pcs_t1@ds=2000-04-10
+PREHOOK: Output: default@pcs_t1
+PREHOOK: Output: default@pcs_t1@ds=2000-04-08
+PREHOOK: Output: default@pcs_t1@ds=2000-04-09
+PREHOOK: Output: default@pcs_t1@ds=2000-04-10
+POSTHOOK: query: analyze table pcs_t1 partition(ds) compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
+POSTHOOK: Output: default@pcs_t1
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-10
+PREHOOK: query: analyze table pcs_t1 partition(ds) compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+PREHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table pcs_t1 partition(ds) compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+PREHOOK: query: explain extended select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcs_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               key
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               value
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds
+      TOK_WHERE
+         or
+            and
+               =
+                  TOK_TABLE_OR_COL
+                     ds
+                  '2000-04-08'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  1
+            and
+               =
+                  TOK_TABLE_OR_COL
+                     ds
+                  '2000-04-09'
+               =
+                  TOK_TABLE_OR_COL
+                     key
+                  2
+      TOK_ORDERBY
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               key
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               value
+         TOK_TABSORTCOLNAMEASC
+            TOK_TABLE_OR_COL
+               ds
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcs_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: (struct(key,ds)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09')) (type: boolean)
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string), ds (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string)
+                  sort order: +++
+                  Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                  tag: -1
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Truncated Path -> Alias:
+        /pcs_t1/ds=2000-04-08 [pcs_t1]
+        /pcs_t1/ds=2000-04-09 [pcs_t1]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 1
+            Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  columns _col0,_col1,_col2
+                  columns.types int:string:string
+                  escape.delim \
+                  hive.serialization.extend.additional.nesting.levels true
+                  serialization.escape.crlf true
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            TotalFiles: 1
+            GatherStats: false
+            MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+2	val_2	2000-04-09
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcs_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds
+      TOK_WHERE
+         TOK_FUNCTION
+            in
+            TOK_FUNCTION
+               struct
+               TOK_TABLE_OR_COL
+                  ds
+               TOK_TABLE_OR_COL
+                  key
+            TOK_FUNCTION
+               struct
+               '2000-04-08'
+               1
+            TOK_FUNCTION
+               struct
+               '2000-04-09'
+               2
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcs_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: (struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: ds (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        columns _col0
+                        columns.types string
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Truncated Path -> Alias:
+        /pcs_t1/ds=2000-04-08 [pcs_t1]
+        /pcs_t1/ds=2000-04-09 [pcs_t1]
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+2000-04-09
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcs_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds
+      TOK_WHERE
+         TOK_FUNCTION
+            in
+            TOK_FUNCTION
+               struct
+               TOK_TABLE_OR_COL
+                  ds
+               +
+                  TOK_TABLE_OR_COL
+                     key
+                  2
+            TOK_FUNCTION
+               struct
+               '2000-04-08'
+               3
+            TOK_FUNCTION
+               struct
+               '2000-04-09'
+               4
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcs_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: (struct(ds,(key + 2))) IN (const struct('2000-04-08',3), const struct('2000-04-09',4)) (type: boolean)
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: ds (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        columns _col0
+                        columns.types string
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Truncated Path -> Alias:
+        /pcs_t1/ds=2000-04-08 [pcs_t1]
+        /pcs_t1/ds=2000-04-09 [pcs_t1]
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+2000-04-09
+PREHOOK: query: explain extended select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_JOIN
+         TOK_TABREF
+            TOK_TABNAME
+               pcs_t1
+            a
+         TOK_TABREF
+            TOK_TABNAME
+               pcs_t1
+            b
+         =
+            .
+               TOK_TABLE_OR_COL
+                  a
+               ds
+            .
+               TOK_TABLE_OR_COL
+                  b
+               ds
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_HINTLIST
+            TOK_HINT
+               TOK_MAPJOIN
+               TOK_HINTARGLIST
+                  pcs_t1
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               ds
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+      TOK_WHERE
+         TOK_FUNCTION
+            in
+            TOK_FUNCTION
+               struct
+               .
+                  TOK_TABLE_OR_COL
+                     a
+                  ds
+               .
+                  TOK_TABLE_OR_COL
+                     a
+                  key
+               .
+                  TOK_TABLE_OR_COL
+                     b
+                  ds
+            TOK_FUNCTION
+               struct
+               '2000-04-08'
+               1
+               '2000-04-09'
+            TOK_FUNCTION
+               struct
+               '2000-04-09'
+               2
+               '2000-04-08'
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Reduce Output Operator
+              key expressions: ds (type: string)
+              sort order: +
+              Map-reduce partition columns: ds (type: string)
+              Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+              tag: 0
+              value expressions: key (type: int)
+              auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Reduce Output Operator
+              key expressions: ds (type: string)
+              sort order: +
+              Map-reduce partition columns: ds (type: string)
+              Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+              tag: 1
+              value expressions: key (type: int)
+              auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Truncated Path -> Alias:
+        /pcs_t1/ds=2000-04-08 [a, b]
+        /pcs_t1/ds=2000-04-09 [a, b]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 ds (type: string)
+            1 ds (type: string)
+          outputColumnNames: _col0, _col2, _col6, _col8
+          Statistics: Num rows: 44 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            isSamplingPred: false
+            predicate: (struct(_col2,_col0,_col8)) IN (const struct('2000-04-08',1,'2000-04-09'), const struct('2000-04-09',2,'2000-04-08')) (type: boolean)
+            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col2 (type: string), _col6 (type: int)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      columns _col0,_col1
+                      columns.types string:int
+                      escape.delim \
+                      hive.serialization.extend.additional.nesting.levels true
+                      serialization.escape.crlf true
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcs_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds
+      TOK_WHERE
+         TOK_FUNCTION
+            in
+            TOK_FUNCTION
+               struct
+               TOK_TABLE_OR_COL
+                  ds
+               +
+                  TOK_TABLE_OR_COL
+                     key
+                  TOK_TABLE_OR_COL
+                     key
+            TOK_FUNCTION
+               struct
+               '2000-04-08'
+               1
+            TOK_FUNCTION
+               struct
+               '2000-04-09'
+               2
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcs_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: (struct(ds,(key + key))) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: ds (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        columns _col0
+                        columns.types string
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Truncated Path -> Alias:
+        /pcs_t1/ds=2000-04-08 [pcs_t1]
+        /pcs_t1/ds=2000-04-09 [pcs_t1]
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+PREHOOK: query: explain select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcs_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: key (type: int)
+                sort order: +
+                Map-reduce partition columns: key (type: int)
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+          PTF Operator
+            Function definitions:
+                Input definition
+                  input alias: ptf_0
+                  output shape: _col0: int
+                  type: WINDOWING
+                Windowing table definition
+                  input alias: ptf_1
+                  name: windowingtablefunction
+                  order by: _col0
+                  partition by: _col0
+                  raw input shape:
+                  window functions:
+                      window function definition
+                        alias: lag_window_0
+                        arguments: _col0
+                        name: lag
+                        window function: GenericUDAFLagEvaluator
+                        window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                        isPivotResult: true
+            Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: lag_window_0 (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: EXPLAIN EXTENDED
+SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_SUBQUERY
+         TOK_UNIONALL
+            TOK_QUERY
+               TOK_FROM
+                  TOK_TABREF
+                     TOK_TABNAME
+                        pcs_t1
+                     X
+               TOK_INSERT
+                  TOK_DESTINATION
+                     TOK_DIR
+                        TOK_TMP_FILE
+                  TOK_SELECT
+                     TOK_SELEXPR
+                        TOK_ALLCOLREF
+                           TOK_TABNAME
+                              X
+                  TOK_WHERE
+                     TOK_FUNCTION
+                        in
+                        TOK_FUNCTION
+                           struct
+                           .
+                              TOK_TABLE_OR_COL
+                                 X
+                              ds
+                           .
+                              TOK_TABLE_OR_COL
+                                 X
+                              key
+                        TOK_FUNCTION
+                           struct
+                           '2000-04-08'
+                           1
+                        TOK_FUNCTION
+                           struct
+                           '2000-04-09'
+                           2
+            TOK_QUERY
+               TOK_FROM
+                  TOK_TABREF
+                     TOK_TABNAME
+                        pcs_t1
+                     Y
+               TOK_INSERT
+                  TOK_DESTINATION
+                     TOK_DIR
+                        TOK_TMP_FILE
+                  TOK_SELECT
+                     TOK_SELEXPR
+                        TOK_ALLCOLREF
+                           TOK_TABNAME
+                              Y
+                  TOK_WHERE
+                     TOK_FUNCTION
+                        in
+                        TOK_FUNCTION
+                           struct
+                           .
+                              TOK_TABLE_OR_COL
+                                 Y
+                              ds
+                           .
+                              TOK_TABLE_OR_COL
+                                 Y
+                              key
+                        TOK_FUNCTION
+                           struct
+                           '2000-04-08'
+                           1
+                        TOK_FUNCTION
+                           struct
+                           '2000-04-09'
+                           2
+         A
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_ALLCOLREF
+      TOK_WHERE
+         =
+            .
+               TOK_TABLE_OR_COL
+                  A
+               ds
+            '2008-04-08'
+      TOK_SORTBY
+         TOK_TABSORTCOLNAMEASC
+            .
+               TOK_TABLE_OR_COL
+                  A
+               key
+         TOK_TABSORTCOLNAMEASC
+            .
+               TOK_TABLE_OR_COL
+                  A
+               value
+         TOK_TABSORTCOLNAMEASC
+            .
+               TOK_TABLE_OR_COL
+                  A
+               ds
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: x
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) and (ds = '2008-04-08')) (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Union
+                  Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: int), _col1 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int), _col1 (type: string), '2008-04-08' (type: string)
+                      sort order: +++
+                      Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                      tag: -1
+                      auto parallelism: false
+          TableScan
+            alias: y
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) and (ds = '2008-04-08')) (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Union
+                  Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: int), _col1 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int), _col1 (type: string), '2008-04-08' (type: string)
+                      sort order: +++
+                      Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                      tag: -1
+                      auto parallelism: false
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), '2008-04-08' (type: string)
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 1
+            Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  columns _col0,_col1,_col2
+                  columns.types int:string:string
+                  escape.delim \
+                  hive.serialization.extend.additional.nesting.levels true
+                  serialization.escape.crlf true
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            TotalFiles: 1
+            GatherStats: false
+            MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+#### A masked pattern was here ####
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcs_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds
+      TOK_WHERE
+         TOK_FUNCTION
+            in
+            TOK_FUNCTION
+               struct
+               TOK_FUNCTION
+                  when
+                  =
+                     TOK_TABLE_OR_COL
+                        ds
+                     '2000-04-08'
+                  10
+                  20
+            TOK_FUNCTION
+               struct
+               10
+            TOK_FUNCTION
+               struct
+               11
+
+
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (const struct(10)) IN (const struct(10), const struct(11)) (type: boolean)
+            Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: ds (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+#### A masked pattern was here ####
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key, rand(100)) in (struct('2000-04-08',1,0.2), struct('2000-04-09',2,0.3))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key, rand(100)) in (struct('2000-04-08',1,0.2), struct('2000-04-09',2,0.3))
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcs_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds
+      TOK_WHERE
+         TOK_FUNCTION
+            in
+            TOK_FUNCTION
+               struct
+               TOK_TABLE_OR_COL
+                  ds
+               TOK_TABLE_OR_COL
+                  key
+               TOK_FUNCTION
+                  rand
+                  100
+            TOK_FUNCTION
+               struct
+               '2000-04-08'
+               1
+               0.2
+            TOK_FUNCTION
+               struct
+               '2000-04-09'
+               2
+               0.3
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: pcs_t1
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: (struct(ds,key,rand(100))) IN (const struct('2000-04-08',1,0.2), const struct('2000-04-09',2,0.3)) (type: boolean)
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: ds (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        columns _col0
+                        columns.types string
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+#### A masked pattern was here ####
+          Partition
+            base file name: ds=2000-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Truncated Path -> Alias:
+        /pcs_t1/ds=2000-04-08 [pcs_t1]
+        /pcs_t1/ds=2000-04-09 [pcs_t1]
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcs_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds
+      TOK_WHERE
+         TOK_FUNCTION
+            in
+            TOK_FUNCTION
+               struct
+               or
+                  =
+                     TOK_TABLE_OR_COL
+                        ds
+                     '2000-04-08'
+                  =
+                     TOK_TABLE_OR_COL
+                        key
+                     2
+               TOK_TABLE_OR_COL
+                  key
+            TOK_FUNCTION
+               struct
+               true
+               2
+            TOK_FUNCTION
+               struct
+               false
+               3
+
+
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-10
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          Statistics: Num rows: 60 Data size: 480 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (struct(((ds = '2000-04-08') or (key = 2)),key)) IN (const struct(true,2), const struct(false,3)) (type: boolean)
+            Statistics: Num rows: 30 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: ds (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 30 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+PREHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+2000-04-08
+2000-04-09
+2000-04-10
+PREHOOK: query: explain extended select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0)
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            pcs_t1
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_TABLE_OR_COL
+               ds
+      TOK_WHERE
+         or
+            =
+               TOK_TABLE_OR_COL
+                  key
+               3
+            and
+               TOK_FUNCTION
+                  in
+                  TOK_FUNCTION
+                     struct
+                     or
+                        =
+                           TOK_TABLE_OR_COL
+                              ds
+                           '2000-04-08'
+                        =
+                           TOK_TABLE_OR_COL
+                              key
+                           2
+                     TOK_TABLE_OR_COL
+                        key
+                  TOK_FUNCTION
+                     struct
+                     true
+                     2
+                  TOK_FUNCTION
+                     struct
+                     false
+                     3
+               >
+                  +
+                     TOK_TABLE_OR_COL
+                        key
+                     5
+                  0
+
+
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-10
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          Statistics: Num rows: 60 Data size: 480 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((key = 3) or ((struct(((ds = '2000-04-08') or (key = 2)),key)) IN (const struct(true,2), const struct(false,3)) and ((key + 5) > 0))) (type: boolean)
+            Statistics: Num rows: 40 Data size: 320 Basic stats: COMP

<TRUNCATED>

[52/55] [abbrv] hive git commit: HIVE-12288: Bloom-1 filters for Vectorized map-joins (Gopal V, reviewed by Matt McCline)

Posted by jx...@apache.org.
HIVE-12288: Bloom-1 filters for Vectorized map-joins (Gopal V, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3bf280ff
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3bf280ff
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3bf280ff

Branch: refs/heads/master-fixed
Commit: 3bf280ff0bb848b069298dd55dd03bd6e3dac97d
Parents: 973268b
Author: Gopal V <go...@apache.org>
Authored: Thu Nov 5 22:18:11 2015 -0800
Committer: Gopal V <go...@apache.org>
Committed: Thu Nov 5 22:18:11 2015 -0800

----------------------------------------------------------------------
 .../ql/exec/persistence/HybridHashTableContainer.java    | 11 +++++++++++
 1 file changed, 11 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3bf280ff/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index 632ba4f..a0c9b98 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -985,6 +985,17 @@ public class HybridHashTableContainer
       int keyHash = HashCodeUtil.murmurHash(bytes, offset, length);
       partitionId = keyHash & (hashPartitions.length - 1);
 
+      if (!bloom1.testLong(keyHash)) {
+        /*
+         * if the keyHash is missing in the bloom filter, then the value cannot exist in any of the
+         * spilled partition - return NOMATCH
+         */
+        dummyRow = null;
+        aliasFilter = (byte) 0xff;
+        hashMapResult.forget();
+        return JoinResult.NOMATCH;
+      }
+
       // If the target hash table is on disk, spill this row to disk as well to be processed later
       if (isOnDisk(partitionId)) {
         return JoinUtil.JoinResult.SPILL;


[20/55] [abbrv] hive git commit: HIVE-12238: Vectorization: Thread-safety errors in VectorUDFDate (Gopal V, reviewed by Gunther Hagleitner)

Posted by jx...@apache.org.
HIVE-12238: Vectorization: Thread-safety errors in VectorUDFDate (Gopal V, reviewed by Gunther Hagleitner)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d7c04859
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d7c04859
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d7c04859

Branch: refs/heads/master-fixed
Commit: d7c04859e1903cd6ed38678e8dae6b453c34b7bb
Parents: ad12765
Author: Gopal V <go...@apache.org>
Authored: Mon Nov 2 19:56:08 2015 -0800
Committer: Gopal V <go...@apache.org>
Committed: Mon Nov 2 19:56:08 2015 -0800

----------------------------------------------------------------------
 .../vector/expressions/VectorUDFDateString.java |  4 +-
 .../expressions/TestVectorDateExpressions.java  | 71 +++++++++++++++++++-
 2 files changed, 72 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d7c04859/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
index f1a5b93..e27ac6a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
@@ -22,6 +22,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.vector.VectorGroupByOperator;
 import org.apache.hadoop.io.Text;
+import org.apache.hive.common.util.DateUtils;
 
 import java.text.SimpleDateFormat;
 import java.util.Date;
@@ -30,14 +31,13 @@ import java.text.ParseException;
 public class VectorUDFDateString extends StringUnaryUDF {
   private static final long serialVersionUID = 1L;
 
-  private transient static SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd");
-
   private static final Logger LOG = LoggerFactory.getLogger(
       VectorUDFDateString.class.getName());
 
   public VectorUDFDateString(int colNum, int outputColumn) {
     super(colNum, outputColumn, new StringUnaryUDF.IUDFUnaryString() {
       Text t = new Text();
+      final transient SimpleDateFormat formatter = DateUtils.getDateFormat();
 
       @Override
       public Text evaluate(Text s) {

http://git-wip-us.apache.org/repos/asf/hive/blob/d7c04859/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java
index 6bd4be1..9c4a751 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java
@@ -18,8 +18,9 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.TestVectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
@@ -31,15 +32,28 @@ import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
+import org.junit.internal.runners.statements.Fail;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.List;
 import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
 
 public class TestVectorDateExpressions {
+
+  private ExecutorService runner;
+
   /* copied over from VectorUDFTimestampFieldLong */
   private TimestampWritable toTimestampWritable(long daysSinceEpoch) {
     Timestamp ts = new Timestamp(DateWritable.daysToMillis((int) daysSinceEpoch));
@@ -412,6 +426,60 @@ public class TestVectorDateExpressions {
     verifyUDFWeekOfYear(batch);
   }
 
+  @Before
+  public void setUp() throws Exception {
+    runner =
+        Executors.newFixedThreadPool(3,
+            new ThreadFactoryBuilder().setNameFormat("date-tester-thread-%d").build());
+  }
+
+  private static final class MultiThreadedDateFormatTest implements Callable<Void> {
+    @Override
+    public Void call() throws Exception {
+      int batchSize = 1024;
+      VectorUDFDateString udf = new VectorUDFDateString(0, 1);
+      VectorizedRowBatch batch = new VectorizedRowBatch(2, batchSize);
+      BytesColumnVector in = new BytesColumnVector(batchSize);
+      BytesColumnVector out = new BytesColumnVector(batchSize);
+      batch.cols[0] = in;
+      batch.cols[1] = out;
+      for (int i = 0; i < batchSize; i++) {
+        byte[] data = String.format("1999-%02d-%02d", 1 + (i % 12), 1 + (i % 15)).getBytes("UTF-8");
+        in.setRef(i, data, 0, data.length);
+        in.isNull[i] = false;
+      }
+      udf.evaluate(batch);
+      // bug if it throws an exception
+      return (Void) null;
+    }
+  }
+
+  // 5s timeout
+  @Test(timeout = 5000)
+  public void testMultiThreadedVectorUDFDate() {
+    List<Callable<Void>> tasks = new ArrayList<Callable<Void>>();
+    for (int i = 0; i < 200; i++) {
+      tasks.add(new MultiThreadedDateFormatTest());
+    }
+    try {
+      List<Future<Void>> results = runner.invokeAll(tasks);
+      for (Future<Void> f : results) {
+        Assert.assertNull(f.get());
+      }
+    } catch (InterruptedException ioe) {
+      Assert.fail("Interrupted while running tests");
+    } catch (Exception e) {
+      Assert.fail("Multi threaded operations threw unexpected Exception: " + e.getMessage());
+    }
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (runner != null) {
+      runner.shutdownNow();
+    }
+  }
+
   public static void main(String[] args) {
     TestVectorDateExpressions self = new TestVectorDateExpressions();
     self.testVectorUDFYear();
@@ -419,5 +487,6 @@ public class TestVectorDateExpressions {
     self.testVectorUDFDayOfMonth();
     self.testVectorUDFWeekOfYear();
     self.testVectorUDFUnixTimeStamp();
+    self.testMultiThreadedVectorUDFDate();
   }
 }


[26/55] [abbrv] hive git commit: HIVE-12297: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : dealing with '$' in typeInfo (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

Posted by jx...@apache.org.
HIVE-12297: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : dealing with '$' in typeInfo (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/62376158
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/62376158
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/62376158

Branch: refs/heads/master-fixed
Commit: 6237615835d6b35c13d49ccb2bc1cd345fc3cf13
Parents: 50177ef
Author: Pengcheng Xiong <px...@apache.org>
Authored: Tue Nov 3 11:24:01 2015 -0800
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Tue Nov 3 11:24:01 2015 -0800

----------------------------------------------------------------------
 .../test/queries/clientpositive/cbo_udf_max.q   | 36 ++++++++++++
 .../results/clientpositive/cbo_udf_max.q.out    | 62 ++++++++++++++++++++
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |  5 +-
 3 files changed, 102 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/62376158/ql/src/test/queries/clientpositive/cbo_udf_max.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_udf_max.q b/ql/src/test/queries/clientpositive/cbo_udf_max.q
new file mode 100644
index 0000000..c22e89b
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/cbo_udf_max.q
@@ -0,0 +1,36 @@
+set hive.cbo.returnpath.hiveop=true;
+
+DESCRIBE FUNCTION max;
+DESCRIBE FUNCTION EXTENDED max;
+
+
+set hive.map.aggr = false;
+set hive.groupby.skewindata = false;
+
+SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src;
+
+
+set hive.map.aggr = true;
+set hive.groupby.skewindata = false;
+
+SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src;
+
+
+set hive.map.aggr = false;
+set hive.groupby.skewindata = true;
+
+SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src;
+
+
+set hive.map.aggr = true;
+set hive.groupby.skewindata = true;
+
+SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src;

http://git-wip-us.apache.org/repos/asf/hive/blob/62376158/ql/src/test/results/clientpositive/cbo_udf_max.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_udf_max.q.out b/ql/src/test/results/clientpositive/cbo_udf_max.q.out
new file mode 100644
index 0000000..410cf31
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_udf_max.q.out
@@ -0,0 +1,62 @@
+PREHOOK: query: DESCRIBE FUNCTION max
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESCRIBE FUNCTION max
+POSTHOOK: type: DESCFUNCTION
+max(expr) - Returns the maximum value of expr
+PREHOOK: query: DESCRIBE FUNCTION EXTENDED max
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESCRIBE FUNCTION EXTENDED max
+POSTHOOK: type: DESCFUNCTION
+max(expr) - Returns the maximum value of expr
+PREHOOK: query: SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+{"expr$0":498,"expr$1":"val_498"}	{"expr$0":498,"expr$1":"val_498"}
+PREHOOK: query: SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+{"expr$0":498,"expr$1":"val_498"}	{"expr$0":"98","expr$1":"val_98"}
+PREHOOK: query: SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+{"expr$0":498,"expr$1":"val_498"}	{"expr$0":498,"expr$1":"val_498"}
+PREHOOK: query: SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT max(struct(CAST(key as INT), value)),
+       max(struct(key, value))
+FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+{"expr$0":498,"expr$1":"val_498"}	{"expr$0":"98","expr$1":"val_98"}

http://git-wip-us.apache.org/repos/asf/hive/blob/62376158/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
index a4323d1..24361c7 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
@@ -256,7 +256,7 @@ public final class TypeInfoUtils {
     };
 
     private static boolean isTypeChar(char c) {
-      return Character.isLetterOrDigit(c) || c == '_' || c == '.' || c == ' ';
+      return Character.isLetterOrDigit(c) || c == '_' || c == '.' || c == ' ' || c == '$';
     }
 
     /**
@@ -266,6 +266,9 @@ public final class TypeInfoUtils {
      *
      * tokenize("map<int,string>") should return
      * ["map","<","int",",","string",">"]
+     * 
+     * Note that we add '$' in new Calcite return path. As '$' will not appear
+     * in any type in Hive, it is safe to do so.
      */
     private static ArrayList<Token> tokenize(String typeInfoString) {
       ArrayList<Token> tokens = new ArrayList<Token>(0);


[09/55] [abbrv] hive git commit: HIVE-11634 : Support partition pruning for IN(STRUCT(partcol, nonpartcol..)...) (Hari Subramaniyan, reviewed by Laljo John Pullokkaran)

Posted by jx...@apache.org.
HIVE-11634 : Support partition pruning for IN(STRUCT(partcol, nonpartcol..)...) (Hari Subramaniyan, reviewed by Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c9246f44
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c9246f44
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c9246f44

Branch: refs/heads/master-fixed
Commit: c9246f44ead401b9121c3badbfbdb07cc9227a0a
Parents: 55a24f0
Author: Hari Subramaniyan <ha...@apache.org>
Authored: Mon Nov 2 11:34:49 2015 -0800
Committer: Hari Subramaniyan <ha...@apache.org>
Committed: Mon Nov 2 11:34:49 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    4 +-
 .../apache/hadoop/hive/conf/HiveConf.java.orig  |    5 +-
 .../hadoop/hive/ql/optimizer/Optimizer.java     |   10 +-
 .../ql/optimizer/PartitionColumnsSeparator.java |  525 ++++
 .../hive/ql/optimizer/PointLookupOptimizer.java |   90 +-
 .../ql/optimizer/pcr/PcrExprProcFactory.java    |   33 +
 .../hive/ql/optimizer/ppr/OpProcFactory.java    |    3 +-
 .../apache/hadoop/hive/ql/plan/FilterDesc.java  |    9 -
 ql/src/test/queries/clientpositive/pcs.q        |   66 +
 .../test/queries/clientpositive/pointlookup.q   |    6 +-
 .../test/queries/clientpositive/pointlookup2.q  |    2 +-
 .../test/queries/clientpositive/pointlookup3.q  |    2 +-
 .../dynpart_sort_optimization_acid.q.out        |    4 +-
 .../llap/dynamic_partition_pruning.q.out        |   45 -
 .../vectorized_dynamic_partition_pruning.q.out  |   45 -
 ql/src/test/results/clientpositive/pcs.q.out    | 2249 ++++++++++++++++++
 .../results/clientpositive/pointlookup.q.out    |    8 +-
 .../tez/dynamic_partition_pruning.q.out         |   45 -
 .../vectorized_dynamic_partition_pruning.q.out  |   45 -
 19 files changed, 2896 insertions(+), 300 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index b214344..5198bb5 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1263,8 +1263,8 @@ public class HiveConf extends Configuration {
          "Whether to transform OR clauses in Filter operators into IN clauses"),
     HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 31,
              "Minimum number of OR clauses needed to transform into IN clauses"),
-    HIVEPOINTLOOKUPOPTIMIZEREXTRACT("hive.optimize.point.lookup.extract", true,
-                 "Extract partial expressions when optimizing point lookup IN clauses"),
+   HIVEPARTITIONCOLUMNSEPARATOR("hive.optimize.partition.columns.separate", true,
+            "Extract partition columns from IN clauses"),
     // Constant propagation optimizer
     HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"),
     HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"),

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
index f05f224..b214344 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
@@ -2206,7 +2206,10 @@ public class HiveConf extends Configuration {
         "Exceeding this will trigger a flush irrelevant of memory pressure condition."),
     HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1,
         "Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."),
-
+    HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED("hive.vectorized.execution.reducesink.new.enabled", true,
+        "This flag should be set to true to enable the new vectorization\n" +
+        "of queries using ReduceSink.\ni" +
+        "The default value is true."),
     HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, "This property has been extended to control "
         + "whether to check, convert, and normalize partition value to conform to its column type in "
         + "partition operations including but not limited to insert, such as alter, describe etc."),

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index 7ee5081..6347872 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -84,11 +84,11 @@ public class Optimizer {
     if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZER)) {
       final int min = HiveConf.getIntVar(hiveConf,
           HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN);
-      final boolean extract = HiveConf.getBoolVar(hiveConf,
-          HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZEREXTRACT);
-      final boolean testMode = HiveConf.getBoolVar(hiveConf,
-          HiveConf.ConfVars.HIVE_IN_TEST);
-      transformations.add(new PointLookupOptimizer(min, extract, testMode));
+      transformations.add(new PointLookupOptimizer(min));
+    }
+
+    if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEPARTITIONCOLUMNSEPARATOR)) {
+        transformations.add(new PartitionColumnsSeparator());
     }
 
     if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
new file mode 100644
index 0000000..f71f37c
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
@@ -0,0 +1,525 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.Stack;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.FilterOperator;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
+import org.apache.hadoop.hive.ql.lib.Dispatcher;
+import org.apache.hadoop.hive.ql.lib.ForwardWalker;
+import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.Node;
+import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
+import org.apache.hadoop.hive.ql.lib.PreOrderOnceWalker;
+import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.RuleRegExp;
+import org.apache.hadoop.hive.ql.lib.TypeRule;
+import org.apache.hadoop.hive.ql.parse.ParseContext;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+/**
+ * This optimization will take a Filter expression, and if its predicate contains
+ * an IN operator whose children are constant structs or structs containing constant fields,
+ * it will try to generate predicate with IN clauses containing only partition columns.
+ * This predicate is in turn used by the partition pruner to prune the columns that are not
+ * part of the original IN(STRUCT(..)..) predicate.
+ */
+public class PartitionColumnsSeparator implements Transform {
+
+  private static final Log LOG = LogFactory.getLog(PointLookupOptimizer.class);
+  private static final String IN_UDF =
+    GenericUDFIn.class.getAnnotation(Description.class).name();
+  private static final String STRUCT_UDF =
+    GenericUDFStruct.class.getAnnotation(Description.class).name();
+  private static final String AND_UDF =
+    GenericUDFOPAnd.class.getAnnotation(Description.class).name();
+
+  @Override
+  public ParseContext transform(ParseContext pctx) throws SemanticException {
+    // 1. Trigger transformation
+    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
+    opRules.put(new RuleRegExp("R1", FilterOperator.getOperatorName() + "%"), new StructInTransformer());
+
+    Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
+    GraphWalker ogw = new ForwardWalker(disp);
+
+    List<Node> topNodes = new ArrayList<Node>();
+    topNodes.addAll(pctx.getTopOps().values());
+    ogw.startWalking(topNodes, null);
+    return pctx;
+  }
+
+  private class StructInTransformer implements NodeProcessor {
+
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+        Object... nodeOutputs) throws SemanticException {
+      FilterOperator filterOp = (FilterOperator) nd;
+      ExprNodeDesc predicate = filterOp.getConf().getPredicate();
+
+      // Generate the list bucketing pruning predicate as 2 separate IN clauses
+      // containing the partitioning and non-partitioning columns.
+      ExprNodeDesc newPredicate = generateInClauses(predicate);
+      if (newPredicate != null) {
+        // Replace filter in current FIL with new FIL
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Generated new predicate with IN clause: " + newPredicate);
+        }
+        final List<ExprNodeDesc> subExpr =
+                new ArrayList<ExprNodeDesc>(2);
+        subExpr.add(predicate);
+        subExpr.add(newPredicate);
+        ExprNodeGenericFuncDesc newFilterPredicate = new ExprNodeGenericFuncDesc(
+                TypeInfoFactory.booleanTypeInfo,
+                FunctionRegistry.getFunctionInfo(AND_UDF).getGenericUDF(), subExpr);
+        filterOp.getConf().setPredicate(newFilterPredicate);
+      }
+
+      return null;
+    }
+
+    private ExprNodeDesc generateInClauses(ExprNodeDesc predicate) throws SemanticException {
+      Map<Rule, NodeProcessor> exprRules = new LinkedHashMap<Rule, NodeProcessor>();
+      exprRules.put(new TypeRule(ExprNodeGenericFuncDesc.class), new StructInExprProcessor());
+
+      // The dispatcher fires the processor corresponding to the closest matching
+      // rule and passes the context along
+      Dispatcher disp = new DefaultRuleDispatcher(null, exprRules, null);
+      GraphWalker egw = new PreOrderOnceWalker(disp);
+
+      List<Node> startNodes = new ArrayList<Node>();
+      startNodes.add(predicate);
+
+      HashMap<Node, Object> outputMap = new HashMap<Node, Object>();
+      egw.startWalking(startNodes, outputMap);
+      return (ExprNodeDesc) outputMap.get(predicate);
+    }
+  }
+
+  /**
+   * The StructInExprProcessor processes the IN clauses of the following format :
+   * STRUCT(T1.a, T1.b, T2.b, T2.c) IN (STRUCT(1, 2, 3, 4) , STRUCT(2, 3, 4, 5))
+   * where T1.a, T1.b, T2.c are all partition columns and T2.b is a non-partition
+   * column. The resulting additional predicate generated after
+   * StructInExprProcessor.process() looks like :
+   *    STRUCT(T1.a, T1.b) IN (STRUCT(1, 2), STRUCT(2, 3))
+   *    AND
+   *    STRUCT(T2.b) IN (STRUCT(4), STRUCT(5))
+   * The additional predicate generated is used to prune the partitions that are
+   * part of the given query. Once the partitions are pruned, the partition condition
+   * remover is expected to remove the redundant predicates from the plan.
+   */
+  private class StructInExprProcessor implements NodeProcessor {
+
+    /** TableInfo is populated in PASS 1 of process(). It contains the information required
+     * to generate an IN clause of the following format:
+     * STRUCT(T1.a, T1.b) IN (const STRUCT(1, 2), const STRUCT(2, 3))
+     * In the above e.g. please note that all elements of the struct come from the same table.
+     * The populated TableStructInfo is used to generate the IN clause in PASS 2 of process().
+     * The table struct information class has the following fields:
+     * 1. Expression Node Descriptor for the Left Hand Side of the IN clause for the table
+     * 2. 2-D List of expression node descriptors which corresponds to the elements of IN clause
+     */
+    class TableInfo {
+      List<ExprNodeDesc> exprNodeLHSDescriptor;
+      List<List<ExprNodeDesc>> exprNodeRHSStructs;
+
+      public TableInfo() {
+       exprNodeLHSDescriptor = new ArrayList<ExprNodeDesc>();
+       exprNodeRHSStructs = new ArrayList<List<ExprNodeDesc>>();
+      }
+    }
+
+    // Mapping from expression node to is an expression containing only
+    // partition or virtual column or constants
+    private Map<ExprNodeDesc, Boolean> exprNodeToPartOrVirtualColOrConstExpr =
+      new IdentityHashMap<ExprNodeDesc, Boolean>();
+
+    /**
+     * This function iterates through the entire subtree under a given expression node
+     * and makes sure that the expression contain only constant nodes or
+     * partition/virtual columns as leaf nodes.
+     * @param en Expression Node Descriptor for the root node.
+     * @return true if the subtree rooted under en has only partition/virtual columns or
+     * constant values as the leaf nodes. Else, return false.
+     */
+    private boolean exprContainsOnlyPartitionColOrVirtualColOrConstants(ExprNodeDesc en) {
+      if (en == null) {
+        return true;
+      }
+      if (exprNodeToPartOrVirtualColOrConstExpr.containsKey(en)) {
+        return exprNodeToPartOrVirtualColOrConstExpr.get(en);
+      }
+      if (en instanceof ExprNodeColumnDesc) {
+        boolean ret = ((ExprNodeColumnDesc)en).getIsPartitionColOrVirtualCol();
+        exprNodeToPartOrVirtualColOrConstExpr.put(en, ret);
+        return ret;
+      }
+      if (en.getChildren() != null) {
+        for (ExprNodeDesc cn : en.getChildren()) {
+          if (!exprContainsOnlyPartitionColOrVirtualColOrConstants(cn)) {
+            exprNodeToPartOrVirtualColOrConstExpr.put(en, false);
+            return false;
+          }
+        }
+      }
+      exprNodeToPartOrVirtualColOrConstExpr.put(en, true);
+      return true;
+    }
+
+
+    /**
+     * Check if the expression node satisfies the following :
+     * Has atleast one subexpression containing a partition/virtualcolumn and has
+     * exactly refer to a single table alias.
+     * @param en Expression Node Descriptor
+     * @return true if there is atleast one subexpression with partition/virtual column
+     * and has exactly refer to a single table alias. If not, return false.
+     */
+    private boolean hasAtleastOneSubExprWithPartColOrVirtualColWithOneTableAlias(ExprNodeDesc en) {
+      if (en == null || en.getChildren() == null) {
+        return false;
+      }
+      for (ExprNodeDesc cn : en.getChildren()) {
+        if (exprContainsOnlyPartitionColOrVirtualColOrConstants(cn) && getTableAlias(cn) != null) {
+          return true;
+        }
+      }
+      return false;
+    }
+
+
+    /**
+     * Check if the expression node satisfies the following :
+     * Has all subexpressions containing constants or a partition/virtual column/coming from the
+     * same table
+     * @param en Expression Node Descriptor
+     * @return true/false based on the condition specified in the above description.
+     */
+    private boolean hasAllSubExprWithConstOrPartColOrVirtualColWithOneTableAlias(ExprNodeDesc en) {
+      if (!exprContainsOnlyPartitionColOrVirtualColOrConstants(en)) {
+        return false;
+      }
+
+      Set<String> s = new HashSet<String>();
+      Set<ExprNodeDesc> visited = new HashSet<ExprNodeDesc>();
+
+      return getTableAliasHelper(en, s, visited);
+    }
+
+
+    /**
+     * Return the expression node descriptor if the input expression node is a GenericUDFIn.
+     * Else, return null.
+     * @param en Expression Node Descriptor
+     * @return The expression node descriptor if the input expression node represents an IN clause.
+     * Else, return null.
+     */
+    private ExprNodeGenericFuncDesc getInExprNode(ExprNodeDesc en) {
+      if (en == null) {
+        return null;
+      }
+
+      if (en instanceof ExprNodeGenericFuncDesc && ((ExprNodeGenericFuncDesc)(en)).getGenericUDF()
+          instanceof GenericUDFIn) {
+        return (ExprNodeGenericFuncDesc) en;
+      }
+      return null;
+    }
+
+
+    /**
+     * Helper used by getTableAlias
+     * @param en Expression Node Descriptor
+     * @param s Set of the table Aliases associated with the current Expression node.
+     * @param visited Visited ExpressionNode set.
+     * @return true if en has at most one table associated with it, else return false.
+     */
+    private boolean getTableAliasHelper(ExprNodeDesc en, Set<String> s, Set<ExprNodeDesc> visited) {
+      visited.add(en);
+
+      // The current expression node is a column, see if the column alias is already a part of
+      // the return set, s. If not and we already have an entry in set s, this is an invalid expression
+      // and return false.
+      if (en instanceof ExprNodeColumnDesc) {
+        if (s.size() > 0 &&
+           !s.contains(((ExprNodeColumnDesc)en).getTabAlias())) {
+          return false;
+        }
+        if (s.size() == 0) {
+          s.add(((ExprNodeColumnDesc)en).getTabAlias());
+        }
+        return true;
+      }
+      if (en.getChildren() == null) {
+        return true;
+      }
+
+      // Iterative through the children in a DFS manner to see if there is more than 1 table alias
+      // referenced by the current expression node.
+      for (ExprNodeDesc cn : en.getChildren()) {
+        if (visited.contains(cn)) {
+          continue;
+        }
+        if (cn instanceof ExprNodeColumnDesc) {
+          s.add(((ExprNodeColumnDesc) cn).getTabAlias());
+        } else if (!(cn instanceof ExprNodeConstantDesc)) {
+          if (!getTableAliasHelper(cn, s, visited)) {
+            return false;
+          }
+        }
+      }
+      return true;
+    }
+
+
+    /**
+     * If the given expression has just a single table associated with it,
+     * return the table alias associated with it. Else, return null.
+     * @param en
+     * @return The table alias associated with the expression if there is a single table
+     * reference. Else, return null.
+     */
+    private String getTableAlias(ExprNodeDesc en) {
+      Set<String> s = new HashSet<String>();
+      Set<ExprNodeDesc> visited = new HashSet<ExprNodeDesc>();
+      boolean singleTableAlias = getTableAliasHelper(en, s, visited);
+
+      if (!singleTableAlias || s.size() == 0) {
+        return null;
+      }
+      StringBuilder ans = new StringBuilder();
+      for (String st : s) {
+        ans.append(st);
+      }
+      return ans.toString();
+    }
+
+
+    /**
+     * The main process method for StructInExprProcessor to generate additional predicates
+     * containing only partition columns.
+     */
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+        Object... nodeOutputs) throws SemanticException {
+      ExprNodeGenericFuncDesc fd = getInExprNode((ExprNodeDesc)nd);
+
+      /***************************************************************************************\
+       BEGIN : Early terminations for Partition Column Separator
+      /***************************************************************************************/
+      // 1. If the input node is not an IN operator, we bail out.
+      if (fd == null) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Partition columns not separated for " + fd + ", is not IN operator : ");
+        }
+        return null;
+      }
+
+      // 2. Check if the input is an IN operator with struct children
+      List<ExprNodeDesc> children = fd.getChildren();
+      if (!(children.get(0) instanceof ExprNodeGenericFuncDesc) ||
+          (!(((ExprNodeGenericFuncDesc) children.get(0)).getGenericUDF()
+           instanceof GenericUDFStruct))) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Partition columns not separated for " + fd + ", children size " +
+           children.size() + ", child expression : " + children.get(0).getExprString());
+        }
+        return null;
+      }
+
+      // 3. See if the IN (STRUCT(EXP1, EXP2,..) has atleast one expression with partition
+      // column with single table alias. If not bail out.
+      // We might have expressions containing only partitioning columns, say, T1.A + T2.B
+      // where T1.A and T2.B are both partitioning columns.
+      // However, these expressions should not be considered as valid expressions for separation.
+      if (!hasAtleastOneSubExprWithPartColOrVirtualColWithOneTableAlias(children.get(0))) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Partition columns not separated for " + fd +
+            ", there are no expression containing partition columns in struct fields");
+        }
+        return null;
+      }
+
+      // 4. See if all the field expressions of the left hand side of IN are expressions 
+      // containing constants or only partition columns coming from same table.
+      // If so, we need not perform this optimization and we should bail out.
+      if (hasAllSubExprWithConstOrPartColOrVirtualColWithOneTableAlias(children.get(0))) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Partition columns not separated for " + fd +
+          ", all fields are expressions containing constants or only partition columns"
+          + "coming from same table");
+        }
+        return null;
+      }
+
+      /***************************************************************************************\
+       END : Early terminations for Partition Column Separator
+      /***************************************************************************************/
+
+
+      /***************************************************************************************\
+       BEGIN : Actual processing of the IN (STRUCT(..)) expression.
+      /***************************************************************************************/
+      Map<String, TableInfo> tableAliasToInfo =
+        new HashMap<>();
+      ExprNodeGenericFuncDesc originalStructDesc = ((ExprNodeGenericFuncDesc) children.get(0));
+      List<ExprNodeDesc> originalDescChildren = originalStructDesc.getChildren();
+      /**
+       * PASS 1 : Iterate through the original IN(STRUCT(..)) and populate the tableAlias to
+       * predicate information inside tableAliasToInfo.
+       */
+      for (int i = 0; i < originalDescChildren.size(); i++) {
+        ExprNodeDesc en =  originalDescChildren.get(i);
+        String tabAlias = null;
+
+        // If the current expression node does not have a virtual/partition column or
+        // single table alias reference, ignore it and move to the next expression node.
+        if (!exprContainsOnlyPartitionColOrVirtualColOrConstants(en) ||
+            (tabAlias = getTableAlias(en)) == null) {
+          continue;
+        }
+
+        TableInfo currTableInfo = null;
+
+        // If the table alias to information map already contains the current table,
+        // use the existing TableInfo object. Else, create a new one.
+        if (tableAliasToInfo.containsKey(tabAlias)) {
+          currTableInfo = tableAliasToInfo.get(tabAlias);
+        } else {
+          currTableInfo = new TableInfo();
+        }
+        currTableInfo.exprNodeLHSDescriptor.add(en);
+
+        // Iterate through the children nodes of the IN clauses starting from index 1,
+        // which corresponds to the right hand side of the IN list.
+        // Insert the value corresponding to the current expression in currExprNodeInfo.exprNodeValues.
+        for (int j = 1; j < children.size(); j++) {
+          ExprNodeDesc currChildStructExpr = children.get(j);
+          ExprNodeDesc newConstStructElement = null;
+
+          // 1. Get the constant value associated with the current element in the struct.
+          // If the current child struct expression is a constant struct.
+          if (currChildStructExpr instanceof ExprNodeConstantDesc) {
+            List<Object> cnCols = (List<Object>)(((ExprNodeConstantDesc) (children.get(j))).getValue());
+            newConstStructElement = new ExprNodeConstantDesc(cnCols.get(i));
+          } else {
+            // This better be a generic struct with constant values as the children.
+            List<ExprNodeDesc> cnChildren = ((ExprNodeGenericFuncDesc) children.get(j)).getChildren();
+            newConstStructElement = new ExprNodeConstantDesc(
+              (((ExprNodeConstantDesc) (cnChildren.get(i))).getValue()));
+          }
+
+          // 2. Insert the current constant value into exprNodeStructs list.
+          // If there is no struct corresponding to the current element, create a new one, insert
+          // the constant value into it and add the struct as part of exprNodeStructs.
+          if (currTableInfo.exprNodeRHSStructs.size() < j) {
+            List<ExprNodeDesc> newConstStructList = new ArrayList<ExprNodeDesc>();
+            newConstStructList.add(newConstStructElement);
+            currTableInfo.exprNodeRHSStructs.add(newConstStructList);
+          } else {
+            // We already have a struct node for the current index. Insert the constant value
+            // into the corresponding struct node.
+            currTableInfo.exprNodeRHSStructs.get(j-1).add(newConstStructElement);
+          }
+        }
+
+        // Insert the current table alias entry into the map if not already present in tableAliasToInfo.
+        if (!tableAliasToInfo.containsKey(tabAlias)) {
+          tableAliasToInfo.put(tabAlias, currTableInfo);
+        }
+      }
+
+      /**
+       * PASS 2 : Iterate through the tableAliasToInfo populated via PASS 1
+       * to generate the new expression.
+       */
+      // subExpr is the list containing generated IN clauses as a result of this optimization.
+      final List<ExprNodeDesc> subExpr =
+        new ArrayList<ExprNodeDesc>(originalDescChildren.size()+1);
+
+      for (Entry<String, TableInfo> entry :
+        tableAliasToInfo.entrySet()) {
+        TableInfo currTableInfo = entry.getValue();
+        List<List<ExprNodeDesc>> currConstStructList = currTableInfo.exprNodeRHSStructs;
+
+        // IN(STRUCT(..)..) ExprNodeDesc list for the current table alias.
+        List<ExprNodeDesc> currInStructExprList = new ArrayList<ExprNodeDesc>();
+
+        // Add the left hand side of the IN clause which contains the struct definition.
+        currInStructExprList.add(ExprNodeGenericFuncDesc.newInstance
+          (FunctionRegistry.getFunctionInfo(STRUCT_UDF).getGenericUDF(),
+          STRUCT_UDF,
+          currTableInfo.exprNodeLHSDescriptor));
+
+        // Generate the right hand side of the IN clause
+        for (int i = 0; i < currConstStructList.size(); i++) {
+          List<ExprNodeDesc> currConstStruct = currConstStructList.get(i);
+
+          // Add the current constant struct to the right hand side of the IN clause.
+          currInStructExprList.add(ExprNodeGenericFuncDesc.newInstance
+            (FunctionRegistry.getFunctionInfo(STRUCT_UDF).getGenericUDF(),
+            STRUCT_UDF,
+            currConstStruct));
+        }
+
+        // Add the newly generated IN clause to subExpr.
+        subExpr.add(new ExprNodeGenericFuncDesc(
+          TypeInfoFactory.booleanTypeInfo, FunctionRegistry.
+          getFunctionInfo(IN_UDF).getGenericUDF(), currInStructExprList));
+      }
+      /***************************************************************************************\
+       END : Actual processing of the IN (STRUCT(..)) expression.
+      /***************************************************************************************/
+
+      // If there is only 1 table ALIAS, return it
+      if (subExpr.size() == 1) {
+        // Return the new expression containing only partition columns
+        return subExpr.get(0);
+      }
+      // Return the new expression containing only partition columns
+      // after concatenating them with AND operator
+      return new ExprNodeGenericFuncDesc(
+        TypeInfoFactory.booleanTypeInfo,
+        FunctionRegistry.getFunctionInfo(AND_UDF).getGenericUDF(), subExpr);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
index 4799b4d..a1a49cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
@@ -18,14 +18,10 @@
 package org.apache.hadoop.hive.ql.optimizer;
 
 import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Comparator;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 import java.util.Stack;
 
 import org.apache.calcite.util.Pair;
@@ -50,18 +46,15 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc.ExprNodeDescEqualityWrapper;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
 import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ImmutableSortedSet;
 import com.google.common.collect.ListMultimap;
 
 /**
@@ -78,48 +71,14 @@ public class PointLookupOptimizer implements Transform {
           GenericUDFIn.class.getAnnotation(Description.class).name();
   private static final String STRUCT_UDF =
           GenericUDFStruct.class.getAnnotation(Description.class).name();
-  private static final String AND_UDF =
-      GenericUDFOPAnd.class.getAnnotation(Description.class).name();
-
   // these are closure-bound for all the walkers in context
   public final int minOrExpr;
-  public final boolean extract;
-  public final boolean testMode;
 
   /*
    * Pass in configs and pre-create a parse context
    */
-  public PointLookupOptimizer(final int min, final boolean extract, final boolean testMode) {
+  public PointLookupOptimizer(final int min) {
     this.minOrExpr = min;
-    this.extract = extract;
-    this.testMode = testMode;
-  }
-
-  // Hash Set iteration isn't ordered, but force string sorted order
-  // to get a consistent test run.
-  private Collection<ExprNodeDescEqualityWrapper> sortForTests(
-      Set<ExprNodeDescEqualityWrapper> valuesExpr) {
-    if (!testMode) {
-      // normal case - sorting is wasted for an IN()
-      return valuesExpr;
-    }
-    final Collection<ExprNodeDescEqualityWrapper> sortedValues;
-
-    sortedValues = ImmutableSortedSet.copyOf(
-        new Comparator<ExprNodeDescEqualityWrapper>() {
-          @Override
-          public int compare(ExprNodeDescEqualityWrapper w1,
-              ExprNodeDescEqualityWrapper w2) {
-            // fail if you find nulls (this is a test-code section)
-            if (w1.equals(w2)) {
-              return 0;
-            }
-            return w1.getExprNodeDesc().getExprString()
-                .compareTo(w2.getExprNodeDesc().getExprString());
-          }
-        }, valuesExpr);
-
-    return sortedValues;
   }
 
   @Override
@@ -152,9 +111,6 @@ public class PointLookupOptimizer implements Transform {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Generated new predicate with IN clause: " + newPredicate);
         }
-        if (!extract) {
-          filterOp.getConf().setOrigPredicate(predicate);
-        }
         filterOp.getConf().setPredicate(newPredicate);
       }
 
@@ -326,50 +282,6 @@ public class PointLookupOptimizer implements Transform {
       newPredicate = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
               FunctionRegistry.getFunctionInfo(IN_UDF).getGenericUDF(), newChildren);
 
-      if (extract && columns.size() > 1) {
-        final List<ExprNodeDesc> subExpr = new ArrayList<ExprNodeDesc>(columns.size()+1);
-
-        // extract pre-conditions for the tuple expressions
-        // (a,b) IN ((1,2),(2,3)) ->
-        //          ((a) IN (1,2) and b in (2,3)) and (a,b) IN ((1,2),(2,3))
-
-        for (String keyString : columnConstantsMap.keySet()) {
-          final Set<ExprNodeDescEqualityWrapper> valuesExpr = 
-              new HashSet<ExprNodeDescEqualityWrapper>(children.size());
-          final List<Pair<ExprNodeColumnDesc, ExprNodeConstantDesc>> partial = 
-              columnConstantsMap.get(keyString);
-          for (int i = 0; i < children.size(); i++) {
-            Pair<ExprNodeColumnDesc, ExprNodeConstantDesc> columnConstant = partial
-                .get(i);
-            valuesExpr
-                .add(new ExprNodeDescEqualityWrapper(columnConstant.right));
-          }
-          ExprNodeColumnDesc lookupCol = partial.get(0).left;
-          // generate a partial IN clause, if the column is a partition column
-          if (lookupCol.getIsPartitionColOrVirtualCol()
-              || valuesExpr.size() < children.size()) {
-            // optimize only nDV reductions
-            final List<ExprNodeDesc> inExpr = new ArrayList<ExprNodeDesc>();
-            inExpr.add(lookupCol);
-            for (ExprNodeDescEqualityWrapper value : sortForTests(valuesExpr)) {
-              inExpr.add(value.getExprNodeDesc());
-            }
-            subExpr.add(new ExprNodeGenericFuncDesc(
-                TypeInfoFactory.booleanTypeInfo, FunctionRegistry
-                    .getFunctionInfo(IN_UDF).getGenericUDF(), inExpr));
-          }
-        }
-        // loop complete, inspect the sub expressions generated
-        if (subExpr.size() > 0) {
-          // add the newPredicate to the end & produce an AND clause
-          subExpr.add(newPredicate);
-          newPredicate = new ExprNodeGenericFuncDesc(
-              TypeInfoFactory.booleanTypeInfo, FunctionRegistry
-                  .getFunctionInfo(AND_UDF).getGenericUDF(), subExpr);
-        }
-        // else, newPredicate is unmodified
-      }
-
       return newPredicate;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
index 7cdc730..2ab1575 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
@@ -48,9 +48,12 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
 /**
@@ -364,6 +367,36 @@ public final class PcrExprProcFactory {
           return getResultWrapFromResults(results, fd, newNodeOutputs);
         }
         return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, newNodeOutputs));
+      } else if (fd.getGenericUDF() instanceof GenericUDFIn) {
+          List<ExprNodeDesc> children = fd.getChildren();
+          boolean removePredElem = false;
+          ExprNodeDesc lhs = children.get(0);
+
+          if (lhs instanceof ExprNodeGenericFuncDesc) {
+              // Make sure that the generic udf is deterministic
+              if (FunctionRegistry.isDeterministic(((ExprNodeGenericFuncDesc) lhs)
+                  .getGenericUDF())) {
+                boolean hasOnlyPartCols = true;
+                for (ExprNodeDesc ed : ((ExprNodeGenericFuncDesc) lhs).getChildren()) {
+                    // Check if the current field expression contains only
+                    // partition column or a virtual column or constants.
+                    // If yes, this filter predicate is a candidate for this optimization.
+                    if (!(ed instanceof ExprNodeColumnDesc &&
+                         ((ExprNodeColumnDesc)ed).getIsPartitionColOrVirtualCol())) {
+                      hasOnlyPartCols = false;
+                      break;
+                    }
+                 }
+                 removePredElem = hasOnlyPartCols;
+              }
+          }
+
+          // If removePredElem is set to true, return true as this is a potential candidate
+          //  for partition condition remover. Else, set the WalkState for this node to unknown.
+          return removePredElem ?
+            new NodeInfoWrapper(WalkState.TRUE, null,
+            new ExprNodeConstantDesc(fd.getTypeInfo(), Boolean.TRUE)) :
+            new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, nodeOutputs)) ;
       } else if (!FunctionRegistry.isDeterministic(fd.getGenericUDF())) {
         // If it's a non-deterministic UDF, set unknown to true
         return new NodeInfoWrapper(WalkState.UNKNOWN, null,

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
index 7262164..fd51628 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
@@ -55,8 +55,7 @@ public final class OpProcFactory extends PrunerOperatorFactory {
         TableScanOperator top) throws SemanticException, UDFArgumentException {
       OpWalkerCtx owc = (OpWalkerCtx) procCtx;
       // Otherwise this is not a sampling predicate and we need to
-      ExprNodeDesc predicate = fop.getConf().getOrigPredicate();
-      predicate = predicate == null ? fop.getConf().getPredicate() : predicate;
+      ExprNodeDesc predicate = fop.getConf().getPredicate();
       String alias = top.getConf().getAlias();
 
       // Generate the partition pruning predicate

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
index 6a31689..ccc4bb4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
@@ -79,7 +79,6 @@ public class FilterDesc extends AbstractOperatorDesc {
 
   private static final long serialVersionUID = 1L;
   private org.apache.hadoop.hive.ql.plan.ExprNodeDesc predicate;
-  private transient ExprNodeDesc origPredicate;
   private boolean isSamplingPred;
   private transient SampleDesc sampleDescr;
   //Is this a filter that should perform a comparison for sorted searches
@@ -151,14 +150,6 @@ public class FilterDesc extends AbstractOperatorDesc {
     this.isSortedFilter = isSortedFilter;
   }
 
-  public void setOrigPredicate(ExprNodeDesc origPredicate) {
-    this.origPredicate = origPredicate;
-  }
-
-  public ExprNodeDesc getOrigPredicate() {
-    return origPredicate;
-  }
-
   /**
    * Some filters are generated or implied, which means it is not in the query.
    * It is added by the analyzer. For example, when we do an inner join, we add

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/queries/clientpositive/pcs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/pcs.q b/ql/src/test/queries/clientpositive/pcs.q
new file mode 100644
index 0000000..4b35a4d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/pcs.q
@@ -0,0 +1,66 @@
+drop table pcs_t1; 
+drop table pcs_t2; 
+
+create table pcs_t1 (key int, value string) partitioned by (ds string); 
+insert overwrite table pcs_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key; 
+insert overwrite table pcs_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key;
+insert overwrite table pcs_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key; 
+
+analyze table pcs_t1 partition(ds) compute statistics;
+analyze table pcs_t1 partition(ds) compute statistics for columns;
+
+set hive.optimize.point.lookup = true;
+set hive.optimize.point.lookup.min = 1;
+
+explain extended select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds;
+select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds;
+
+set hive.optimize.point.lookup = false;
+set hive.optimize.partition.columns.separate=true;
+set hive.optimize.ppd=true;
+
+explain extended select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2));
+select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2));
+
+explain extended select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4));
+select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4));
+
+explain extended select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'));
+
+select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'));
+
+explain extended select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2));
+select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2));
+
+explain select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2));
+select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2));
+
+EXPLAIN EXTENDED
+SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds;
+
+SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds;
+
+explain extended select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11));
+select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11));
+
+explain extended select ds from pcs_t1 where struct(ds, key, rand(100)) in (struct('2000-04-08',1,0.2), struct('2000-04-09',2,0.3));
+
+explain extended select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3));
+select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3));
+
+explain extended select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0);
+select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/queries/clientpositive/pointlookup.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/pointlookup.q b/ql/src/test/queries/clientpositive/pointlookup.q
index 1aef2ef..c460f39 100644
--- a/ql/src/test/queries/clientpositive/pointlookup.q
+++ b/ql/src/test/queries/clientpositive/pointlookup.q
@@ -18,8 +18,7 @@ WHERE
 
 
 set hive.optimize.point.lookup.min=3;
-set hive.optimize.point.lookup.extract=false;
-
+set hive.optimize.partition.columns.separate=false;
 explain
 SELECT key
 FROM src
@@ -38,8 +37,7 @@ WHERE
    AND value = '3'))
 ;
 
-set hive.optimize.point.lookup.extract=true;
-
+set hive.optimize.partition.columns.separate=true;
 explain
 SELECT key
 FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/queries/clientpositive/pointlookup2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/pointlookup2.q b/ql/src/test/queries/clientpositive/pointlookup2.q
index 31bebbb..94e99fb 100644
--- a/ql/src/test/queries/clientpositive/pointlookup2.q
+++ b/ql/src/test/queries/clientpositive/pointlookup2.q
@@ -14,7 +14,7 @@ from pcr_t1
 insert overwrite table pcr_t2 select ds, key, value where ds='2000-04-08' and key=2;
 
 set hive.optimize.point.lookup.min=2;
-set hive.optimize.point.lookup.extract=true;
+set hive.optimize.partition.columns.separate=true;
 
 explain extended
 select key, value, ds

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/queries/clientpositive/pointlookup3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/pointlookup3.q b/ql/src/test/queries/clientpositive/pointlookup3.q
index 3daa94b..79e7348 100644
--- a/ql/src/test/queries/clientpositive/pointlookup3.q
+++ b/ql/src/test/queries/clientpositive/pointlookup3.q
@@ -6,7 +6,7 @@ insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') sel
 insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key;
 
 set hive.optimize.point.lookup.min=2;
-set hive.optimize.point.lookup.extract=true;
+set hive.optimize.partition.columns.separate=true;
 
 explain extended
 select key, value, ds1, ds2

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
index eca29df..ddb05e2 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
@@ -153,7 +153,7 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (key = 'foo') (type: boolean)
+              predicate: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean)
               Select Operator
                 expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), ds (type: string)
                 outputColumnNames: _col0, _col3
@@ -390,7 +390,7 @@ STAGE PLANS:
           TableScan
             alias: acid
             Filter Operator
-              predicate: (key = 'foo') (type: boolean)
+              predicate: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean)
               Select Operator
                 expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), ds (type: string)
                 outputColumnNames: _col0, _col3

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
index 4320f01..7b428bc 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
@@ -1275,21 +1275,6 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
             Execution mode: llap
@@ -4076,21 +4061,6 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
             Execution mode: uber
@@ -5229,21 +5199,6 @@ STAGE PLANS:
                           Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                           Target column: ds
                           Target Vertex: Map 1
-                    Select Operator
-                      expressions: UDFToDouble(hr) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart_orc
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
             Execution mode: uber

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
index e30465d..e9192a3 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
@@ -1275,21 +1275,6 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
             Execution mode: llap
@@ -4076,21 +4061,6 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
             Execution mode: vectorized, uber
@@ -5229,21 +5199,6 @@ STAGE PLANS:
                           Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                           Target column: ds
                           Target Vertex: Map 1
-                    Select Operator
-                      expressions: UDFToDouble(hr) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart_orc
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
             Execution mode: uber


[34/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by jx...@apache.org.
HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3228ba7c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3228ba7c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3228ba7c

Branch: refs/heads/master-fixed
Commit: 3228ba7c13ced90f8e845ea8f3ca1a46737ec1fe
Parents: fe6ebf7
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Tue Nov 3 19:41:17 2015 -0800
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Tue Nov 3 19:41:17 2015 -0800

----------------------------------------------------------------------
 .../hive/ql/exec/vector/VectorSerializeRow.java |   4 +-
 .../expressions/FilterStructColumnInList.java   |   3 +-
 .../vector/expressions/StructColumnInList.java  |   3 +-
 .../hive/ql/exec/vector/TestVectorSerDeRow.java |  19 +-
 .../test/results/clientpositive/acid_join.q.out |   2 +-
 .../alter_partition_change_col.q.out            | 240 ++---
 .../clientpositive/alter_table_cascade.q.out    |  40 +-
 .../clientpositive/ansi_sql_arithmetic.q.out    |   2 +-
 .../results/clientpositive/avro_decimal.q.out   |  10 +-
 .../clientpositive/avro_decimal_native.q.out    |  10 +-
 .../clientpositive/cast_qualified_types.q.out   |   2 +-
 .../results/clientpositive/decimal_1_1.q.out    |  48 +-
 .../test/results/clientpositive/decimal_3.q.out | 514 +++++-----
 .../test/results/clientpositive/decimal_4.q.out | 144 +--
 .../test/results/clientpositive/decimal_5.q.out | 180 ++--
 .../test/results/clientpositive/decimal_6.q.out |  92 +-
 .../results/clientpositive/decimal_join2.q.out  | 260 ++---
 .../clientpositive/decimal_precision.q.out      | 170 ++--
 .../clientpositive/decimal_trailing.q.out       |  42 +-
 .../results/clientpositive/decimal_udf.q.out    | 960 +++++++++----------
 .../insert_nonacid_from_acid.q.out              |  20 +-
 .../llap/hybridgrace_hashjoin_1.q.out           | 204 ++--
 .../clientpositive/llap/mapjoin_decimal.q.out   | 424 ++++----
 .../results/clientpositive/orc_file_dump.q.out  |   6 +-
 .../clientpositive/orc_predicate_pushdown.q.out |   4 +-
 .../clientpositive/parquet_decimal.q.out        |  16 +-
 .../clientpositive/parquet_ppd_boolean.q.out    | 180 ++--
 .../clientpositive/parquet_ppd_char.q.out       | 220 ++---
 .../clientpositive/parquet_ppd_date.q.out       | 330 +++----
 .../clientpositive/parquet_ppd_decimal.q.out    | 660 ++++++-------
 .../clientpositive/parquet_ppd_timestamp.q.out  | 320 +++----
 .../clientpositive/parquet_ppd_varchar.q.out    | 220 ++---
 .../parquet_predicate_pushdown.q.out            |   4 +-
 .../results/clientpositive/serde_regex.q.out    |  74 +-
 .../spark/avro_decimal_native.q.out             |  10 +-
 .../clientpositive/spark/decimal_1_1.q.out      |  48 +-
 .../clientpositive/spark/mapjoin_decimal.q.out  | 424 ++++----
 .../spark/vector_between_in.q.out               |  14 +-
 .../spark/vector_cast_constant.q.java1.7.out    |  20 +-
 .../spark/vector_data_types.q.out               |   4 +-
 .../spark/vector_decimal_aggregate.q.out        |  32 +-
 .../spark/vector_decimal_mapjoin.q.out          | 212 ++--
 .../clientpositive/sum_expr_with_order.q.out    |   2 +-
 .../tez/hybridgrace_hashjoin_1.q.out            | 204 ++--
 .../clientpositive/tez/mapjoin_decimal.q.out    | 424 ++++----
 .../clientpositive/tez/update_all_types.q.out   |  30 +-
 .../clientpositive/tez/vector_aggregate_9.q.out |   2 +-
 .../clientpositive/tez/vector_between_in.q.out  |  14 +-
 .../tez/vector_cast_constant.q.java1.7.out      |  20 +-
 .../clientpositive/tez/vector_data_types.q.out  |   4 +-
 .../clientpositive/tez/vector_decimal_2.q.out   |   4 +-
 .../clientpositive/tez/vector_decimal_3.q.out   | 514 +++++-----
 .../clientpositive/tez/vector_decimal_4.q.out   | 288 +++---
 .../clientpositive/tez/vector_decimal_5.q.out   | 180 ++--
 .../clientpositive/tez/vector_decimal_6.q.out   | 172 ++--
 .../tez/vector_decimal_aggregate.q.out          |  32 +-
 .../tez/vector_decimal_cast.q.out               |  20 +-
 .../tez/vector_decimal_expressions.q.out        |  20 +-
 .../tez/vector_decimal_mapjoin.q.out            | 212 ++--
 .../tez/vector_decimal_precision.q.out          | 170 ++--
 .../tez/vector_decimal_round_2.q.out            |  14 +-
 .../tez/vector_decimal_trailing.q.out           |  42 +-
 .../clientpositive/tez/vector_decimal_udf.q.out | 960 +++++++++----------
 .../tez/vector_reduce_groupby_decimal.q.out     |  98 +-
 .../clientpositive/update_all_types.q.out       |  30 +-
 .../clientpositive/vector_aggregate_9.q.out     |   2 +-
 .../clientpositive/vector_between_in.q.out      |  14 +-
 .../vector_cast_constant.q.java1.7.out          |  20 +-
 .../clientpositive/vector_data_types.q.out      |   4 +-
 .../clientpositive/vector_decimal_2.q.out       |   4 +-
 .../clientpositive/vector_decimal_3.q.out       | 514 +++++-----
 .../clientpositive/vector_decimal_4.q.out       | 288 +++---
 .../clientpositive/vector_decimal_5.q.out       | 180 ++--
 .../clientpositive/vector_decimal_6.q.out       | 172 ++--
 .../vector_decimal_aggregate.q.out              |  32 +-
 .../clientpositive/vector_decimal_cast.q.out    |  20 +-
 .../vector_decimal_expressions.q.out            |  20 +-
 .../clientpositive/vector_decimal_mapjoin.q.out | 212 ++--
 .../vector_decimal_precision.q.out              | 170 ++--
 .../clientpositive/vector_decimal_round_2.q.out |  14 +-
 .../vector_decimal_trailing.q.out               |  42 +-
 .../clientpositive/vector_decimal_udf.q.out     | 960 +++++++++----------
 .../vector_reduce_groupby_decimal.q.out         |  98 +-
 .../clientpositive/windowing_decimal.q.out      | 104 +-
 .../clientpositive/windowing_navfn.q.out        |  20 +-
 .../results/clientpositive/windowing_rank.q.out |  60 +-
 .../clientpositive/windowing_windowspec3.q.out  |  18 +-
 .../fast/BinarySortableSerializeWrite.java      |   2 +-
 .../hadoop/hive/serde2/fast/SerializeWrite.java |   2 +-
 .../hive/serde2/lazy/LazyHiveDecimal.java       |   4 +-
 .../hadoop/hive/serde2/lazy/LazyUtils.java      |   3 +-
 .../lazy/fast/LazySimpleSerializeWrite.java     |  14 +-
 .../fast/LazyBinarySerializeWrite.java          |   2 +-
 .../apache/hadoop/hive/serde2/VerifyFast.java   |   9 +-
 .../binarysortable/TestBinarySortableFast.java  |   3 +-
 .../hive/serde2/lazy/TestLazySimpleFast.java    |   3 +-
 .../serde2/lazybinary/TestLazyBinaryFast.java   |   3 +-
 .../hadoop/hive/common/type/HiveDecimal.java    |  11 +
 .../ql/exec/vector/DecimalColumnVector.java     |   2 -
 99 files changed, 6331 insertions(+), 6342 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java
index fe889b5..c98c260 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSerializeRow.java
@@ -491,7 +491,7 @@ public final class VectorSerializeRow<T extends SerializeWrite> {
 
       if (colVector.isRepeating) {
         if (colVector.noNulls || !colVector.isNull[0]) {
-          serializeWrite.writeHiveDecimal(colVector.vector[0].getHiveDecimal());
+          serializeWrite.writeHiveDecimal(colVector.vector[0].getHiveDecimal(), colVector.scale);
           return true;
         } else {
           serializeWrite.writeNull();
@@ -499,7 +499,7 @@ public final class VectorSerializeRow<T extends SerializeWrite> {
         }
       } else {
         if (colVector.noNulls || !colVector.isNull[batchIndex]) {
-          serializeWrite.writeHiveDecimal(colVector.vector[batchIndex].getHiveDecimal());
+          serializeWrite.writeHiveDecimal(colVector.vector[batchIndex].getHiveDecimal(), colVector.scale);
           return true;
         } else {
           serializeWrite.writeNull();

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
index 00f22bb..70b393c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
@@ -110,8 +110,9 @@ public class FilterStructColumnInList extends FilterStringColumnInList implement
             break;
 
           case DECIMAL:
+            DecimalColumnVector decColVector = ((DecimalColumnVector) colVec);
             binarySortableSerializeWrite.writeHiveDecimal(
-                ((DecimalColumnVector) colVec).vector[adjustedIndex].getHiveDecimal());
+                decColVector.vector[adjustedIndex].getHiveDecimal(), decColVector.scale);
             break;
 
           default:

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
index 724497a..769c70a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
@@ -111,8 +111,9 @@ public class StructColumnInList extends StringColumnInList implements IStructInE
             break;
 
           case DECIMAL:
+            DecimalColumnVector decColVector = ((DecimalColumnVector) colVec);
             binarySortableSerializeWrite.writeHiveDecimal(
-                ((DecimalColumnVector) colVec).vector[adjustedIndex].getHiveDecimal());
+                decColVector.vector[adjustedIndex].getHiveDecimal(), decColVector.scale);
             break;
 
           default:

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java
index 23e44f0..eaff732 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSerDeRow.java
@@ -57,21 +57,10 @@ import org.apache.hadoop.hive.serde2.lazy.fast.LazySimpleSerializeWrite;
 import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinaryDeserializeRead;
 import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinarySerializeWrite;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableByteObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableDateObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableDoubleObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableFloatObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableIntObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableLongObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableShortObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableStringObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.hive.serde2.fast.SerializeWrite;
 import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.BytesWritable;
@@ -415,10 +404,10 @@ public class TestVectorSerDeRow extends TestCase {
   private Output serializeRow(Object[] row, RandomRowObjectSource source, SerializeWrite serializeWrite) throws HiveException, IOException {
     Output output = new Output();
     serializeWrite.set(output);
-    PrimitiveCategory[] primitiveCategories = source.primitiveCategories();
-    for (int i = 0; i < primitiveCategories.length; i++) {
+    PrimitiveTypeInfo[] primitiveTypeInfos = source.primitiveTypeInfos();
+    for (int i = 0; i < primitiveTypeInfos.length; i++) {
       Object object = row[i];
-      PrimitiveCategory primitiveCategory = primitiveCategories[i];
+      PrimitiveCategory primitiveCategory = primitiveTypeInfos[i].getPrimitiveCategory();
       switch (primitiveCategory) {
       case BOOLEAN:
         {
@@ -529,7 +518,7 @@ public class TestVectorSerDeRow extends TestCase {
         {
           HiveDecimalWritable expectedWritable = (HiveDecimalWritable) object;
           HiveDecimal value = expectedWritable.getHiveDecimal();
-          serializeWrite.writeHiveDecimal(value);
+          serializeWrite.writeHiveDecimal(value, ((DecimalTypeInfo)primitiveTypeInfos[i]).scale());
         }
         break;
       default:

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/acid_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_join.q.out b/ql/src/test/results/clientpositive/acid_join.q.out
index a1edb89..fcc7d75 100644
--- a/ql/src/test/results/clientpositive/acid_join.q.out
+++ b/ql/src/test/results/clientpositive/acid_join.q.out
@@ -65,7 +65,7 @@ POSTHOOK: query: select * from acidjoin3 order by name
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acidjoin3
 #### A masked pattern was here ####
-aaa	35	3
+aaa	35	3.00
 bbb	32	3.01
 ccc	32	3.02
 ddd	35	3.03

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/alter_partition_change_col.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_change_col.q.out b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out
index 50520aa..9e397c1 100644
--- a/ql/src/test/results/clientpositive/alter_partition_change_col.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out
@@ -213,16 +213,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77	abc	123
-Beck	80	abc	123
-Cluck	6	abc	123
-Mary	33	abc	123
-Mary	4	abc	123
-Snow	56	abc	123
-Tom	-12	abc	123
-Tom	19	abc	123
-Tom	235	abc	123
+Beck	0.0000	abc	123
+Beck	77.0000	abc	123
+Beck	80.0000	abc	123
+Cluck	6.0000	abc	123
+Mary	33.0000	abc	123
+Mary	4.0000	abc	123
+Snow	56.0000	abc	123
+Tom	-12.0000	abc	123
+Tom	19.0000	abc	123
+Tom	235.0000	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -233,16 +233,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77	__HIVE_DEFAULT_PARTITION__	123
-Beck	80	__HIVE_DEFAULT_PARTITION__	123
-Cluck	6	__HIVE_DEFAULT_PARTITION__	123
-Mary	33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4	__HIVE_DEFAULT_PARTITION__	123
-Snow	56	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	235	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	80.0000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	6.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.0000	__HIVE_DEFAULT_PARTITION__	123
+Snow	56.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	235.0000	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- now change the column type of the existing partition
 alter table alter_partition_change_col1 partition (p1='abc', p2='123') change c2 c2 decimal(14,4)
 PREHOOK: type: ALTERTABLE_RENAMECOL
@@ -280,16 +280,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77.341	abc	123
-Beck	79.9	abc	123
-Cluck	5.96	abc	123
-Mary	33.33	abc	123
-Mary	4.329	abc	123
-Snow	55.71	abc	123
-Tom	-12.25	abc	123
-Tom	19	abc	123
-Tom	234.79	abc	123
+Beck	0.0000	abc	123
+Beck	77.3410	abc	123
+Beck	79.9000	abc	123
+Cluck	5.9600	abc	123
+Mary	33.3300	abc	123
+Mary	4.3290	abc	123
+Snow	55.7100	abc	123
+Tom	-12.2500	abc	123
+Tom	19.0000	abc	123
+Tom	234.7900	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -300,16 +300,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77	__HIVE_DEFAULT_PARTITION__	123
-Beck	80	__HIVE_DEFAULT_PARTITION__	123
-Cluck	6	__HIVE_DEFAULT_PARTITION__	123
-Mary	33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4	__HIVE_DEFAULT_PARTITION__	123
-Snow	56	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	235	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	80.0000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	6.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.0000	__HIVE_DEFAULT_PARTITION__	123
+Snow	56.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	235.0000	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- change column for default partition value
 alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') change c2 c2 decimal(14,4)
 PREHOOK: type: ALTERTABLE_RENAMECOL
@@ -347,16 +347,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77.341	abc	123
-Beck	79.9	abc	123
-Cluck	5.96	abc	123
-Mary	33.33	abc	123
-Mary	4.329	abc	123
-Snow	55.71	abc	123
-Tom	-12.25	abc	123
-Tom	19	abc	123
-Tom	234.79	abc	123
+Beck	0.0000	abc	123
+Beck	77.3410	abc	123
+Beck	79.9000	abc	123
+Cluck	5.9600	abc	123
+Mary	33.3300	abc	123
+Mary	4.3290	abc	123
+Snow	55.7100	abc	123
+Tom	-12.2500	abc	123
+Tom	19.0000	abc	123
+Tom	234.7900	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -367,16 +367,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- Try out replace columns
 alter table alter_partition_change_col1 partition (p1='abc', p2='123') replace columns (c1 string)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
@@ -449,16 +449,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: alter table alter_partition_change_col1 replace columns (c1 string)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@alter_partition_change_col1
@@ -593,16 +593,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: alter table alter_partition_change_col1 partition (p1='abc', p2='123') add columns (c2 decimal(14,4))
 PREHOOK: type: ALTERTABLE_ADDCOLS
 PREHOOK: Input: default@alter_partition_change_col1
@@ -638,16 +638,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77.341	abc	123
-Beck	79.9	abc	123
-Cluck	5.96	abc	123
-Mary	33.33	abc	123
-Mary	4.329	abc	123
-Snow	55.71	abc	123
-Tom	-12.25	abc	123
-Tom	19	abc	123
-Tom	234.79	abc	123
+Beck	0.0000	abc	123
+Beck	77.3410	abc	123
+Beck	79.9000	abc	123
+Cluck	5.9600	abc	123
+Mary	33.3300	abc	123
+Mary	4.3290	abc	123
+Snow	55.7100	abc	123
+Tom	-12.2500	abc	123
+Tom	19.0000	abc	123
+Tom	234.7900	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -658,16 +658,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- Try changing column for all partitions at once
 alter table alter_partition_change_col1 partition (p1, p2='123') change column c2 c2 decimal(10,0)
 PREHOOK: type: ALTERTABLE_RENAMECOL
@@ -724,16 +724,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123
 #### A masked pattern was here ####
-Beck	0	abc	123
-Beck	77	abc	123
-Beck	80	abc	123
-Cluck	6	abc	123
-Mary	33	abc	123
-Mary	4	abc	123
-Snow	56	abc	123
-Tom	-12	abc	123
-Tom	19	abc	123
-Tom	235	abc	123
+Beck	0.0000	abc	123
+Beck	77.0000	abc	123
+Beck	80.0000	abc	123
+Cluck	6.0000	abc	123
+Mary	33.0000	abc	123
+Mary	4.0000	abc	123
+Snow	56.0000	abc	123
+Tom	-12.0000	abc	123
+Tom	19.0000	abc	123
+Tom	235.0000	abc	123
 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_partition_change_col1
@@ -744,13 +744,13 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_partition_change_col1
 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77	__HIVE_DEFAULT_PARTITION__	123
-Beck	80	__HIVE_DEFAULT_PARTITION__	123
-Cluck	6	__HIVE_DEFAULT_PARTITION__	123
-Mary	33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4	__HIVE_DEFAULT_PARTITION__	123
-Snow	56	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	235	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	80.0000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	6.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.0000	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.0000	__HIVE_DEFAULT_PARTITION__	123
+Snow	56.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	235.0000	__HIVE_DEFAULT_PARTITION__	123

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/alter_table_cascade.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_cascade.q.out b/ql/src/test/results/clientpositive/alter_table_cascade.q.out
index 3bf1a43..1d8204c 100644
--- a/ql/src/test/results/clientpositive/alter_table_cascade.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_cascade.q.out
@@ -833,16 +833,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_table_cascade
 POSTHOOK: Input: default@alter_table_cascade@p1=xyz/p2=123
 #### A masked pattern was here ####
-Beck	0	xyz	123
-Beck	77.341	xyz	123
-Beck	79.9	xyz	123
-Cluck	5.96	xyz	123
-Mary	33.33	xyz	123
-Mary	4.329	xyz	123
-Snow	55.71	xyz	123
-Tom	-12.25	xyz	123
-Tom	19	xyz	123
-Tom	234.79	xyz	123
+Beck	0.0000	xyz	123
+Beck	77.3410	xyz	123
+Beck	79.9000	xyz	123
+Cluck	5.9600	xyz	123
+Mary	33.3300	xyz	123
+Mary	4.3290	xyz	123
+Snow	55.7100	xyz	123
+Tom	-12.2500	xyz	123
+Tom	19.0000	xyz	123
+Tom	234.7900	xyz	123
 PREHOOK: query: select * from alter_table_cascade where p1='abc'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_table_cascade
@@ -873,16 +873,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_table_cascade
 POSTHOOK: Input: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123
 #### A masked pattern was here ####
-Beck	0	__HIVE_DEFAULT_PARTITION__	123
-Beck	77.341	__HIVE_DEFAULT_PARTITION__	123
-Beck	79.9	__HIVE_DEFAULT_PARTITION__	123
-Cluck	5.96	__HIVE_DEFAULT_PARTITION__	123
-Mary	33.33	__HIVE_DEFAULT_PARTITION__	123
-Mary	4.329	__HIVE_DEFAULT_PARTITION__	123
-Snow	55.71	__HIVE_DEFAULT_PARTITION__	123
-Tom	-12.25	__HIVE_DEFAULT_PARTITION__	123
-Tom	19	__HIVE_DEFAULT_PARTITION__	123
-Tom	234.79	__HIVE_DEFAULT_PARTITION__	123
+Beck	0.0000	__HIVE_DEFAULT_PARTITION__	123
+Beck	77.3410	__HIVE_DEFAULT_PARTITION__	123
+Beck	79.9000	__HIVE_DEFAULT_PARTITION__	123
+Cluck	5.9600	__HIVE_DEFAULT_PARTITION__	123
+Mary	33.3300	__HIVE_DEFAULT_PARTITION__	123
+Mary	4.3290	__HIVE_DEFAULT_PARTITION__	123
+Snow	55.7100	__HIVE_DEFAULT_PARTITION__	123
+Tom	-12.2500	__HIVE_DEFAULT_PARTITION__	123
+Tom	19.0000	__HIVE_DEFAULT_PARTITION__	123
+Tom	234.7900	__HIVE_DEFAULT_PARTITION__	123
 PREHOOK: query: -- 
 
 drop table if exists alter_table_restrict

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out b/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out
index 5e5a2f6..021c4ee 100644
--- a/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out
@@ -44,7 +44,7 @@ POSTHOOK: query: select cast(key as int) / cast(key as int) from src limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-1
+1.00000000000
 PREHOOK: query: -- With ansi sql arithmetic disabled, int / int => double
 explain select cast(key as int) / cast(key as int) from src limit 1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/avro_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_decimal.q.out b/ql/src/test/results/clientpositive/avro_decimal.q.out
index 7ba376e..64e65ca 100644
--- a/ql/src/test/results/clientpositive/avro_decimal.q.out
+++ b/ql/src/test/results/clientpositive/avro_decimal.q.out
@@ -106,9 +106,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19
-Beck	0
-Beck	79.9
+Tom	19.00
+Beck	0.00
+Beck	79.90
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1
@@ -175,10 +175,10 @@ POSTHOOK: Input: default@avro_dec1
 77.3
 55.7
 4.3
-6
+6.0
 12.3
 33.3
-19
+19.0
 3.2
 79.9
 PREHOOK: query: DROP TABLE dec

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/avro_decimal_native.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_decimal_native.q.out b/ql/src/test/results/clientpositive/avro_decimal_native.q.out
index 318be3d..cebc342 100644
--- a/ql/src/test/results/clientpositive/avro_decimal_native.q.out
+++ b/ql/src/test/results/clientpositive/avro_decimal_native.q.out
@@ -92,9 +92,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19
-Beck	0
-Beck	79.9
+Tom	19.00
+Beck	0.00
+Beck	79.90
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1
@@ -143,10 +143,10 @@ POSTHOOK: Input: default@avro_dec1
 77.3
 55.7
 4.3
-6
+6.0
 12.3
 33.3
-19
+19.0
 3.2
 79.9
 PREHOOK: query: DROP TABLE dec

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/cast_qualified_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cast_qualified_types.q.out b/ql/src/test/results/clientpositive/cast_qualified_types.q.out
index 1924c5d..099a199 100644
--- a/ql/src/test/results/clientpositive/cast_qualified_types.q.out
+++ b/ql/src/test/results/clientpositive/cast_qualified_types.q.out
@@ -18,4 +18,4 @@ limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-0	0         	0
+0.00	0         	0

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/decimal_1_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_1_1.q.out b/ql/src/test/results/clientpositive/decimal_1_1.q.out
index b2704c6..46fbeb7 100644
--- a/ql/src/test/results/clientpositive/decimal_1_1.q.out
+++ b/ql/src/test/results/clientpositive/decimal_1_1.q.out
@@ -26,9 +26,9 @@ POSTHOOK: query: select * from decimal_1_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_1_1
 #### A masked pattern was here ####
-0
-0
-0
+0.0
+0.0
+0.0
 0.1
 0.2
 0.9
@@ -37,13 +37,13 @@ NULL
 0.3
 NULL
 NULL
-0
-0
+0.0
+0.0
 NULL
-0
-0
-0
-0
+0.0
+0.0
+0.0
+0.0
 -0.1
 -0.2
 -0.9
@@ -52,10 +52,10 @@ NULL
 -0.3
 NULL
 NULL
-0
-0
+0.0
+0.0
 NULL
-0
+0.0
 PREHOOK: query: select d from decimal_1_1 order by d desc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_1_1
@@ -69,18 +69,18 @@ POSTHOOK: Input: default@decimal_1_1
 0.3
 0.2
 0.1
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
+0.0
 -0.1
 -0.2
 -0.3

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/decimal_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_3.q.out b/ql/src/test/results/clientpositive/decimal_3.q.out
index 8e9a30a..3ded9a7 100644
--- a/ql/src/test/results/clientpositive/decimal_3.q.out
+++ b/ql/src/test/results/clientpositive/decimal_3.q.out
@@ -33,43 +33,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -78,43 +78,43 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-1234567890.12345678	1234567890
-200	200
-125.2	125
-124	124
-100	100
-20	20
-10	10
-3.14	4
-3.14	3
-3.14	3
-3.14	3
-2	2
-2	2
-1.122	1
-1.12	1
-1	1
-1	1
-1	1
-0.333	0
-0.33	0
-0.3	0
-0.2	0
-0.1	0
-0.02	0
-0.01	0
-0	0
-0	0
-0	0
--0.3	0
--0.33	0
--0.333	0
--1.12	-1
--1.12	-1
--1.122	-11
--1255.49	-1255
--4400	4400
--1234567890.123456789	-1234567890
+1234567890.123456780000000000	1234567890
+200.000000000000000000	200
+125.200000000000000000	125
+124.000000000000000000	124
+100.000000000000000000	100
+20.000000000000000000	20
+10.000000000000000000	10
+3.140000000000000000	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+2.000000000000000000	2
+2.000000000000000000	2
+1.122000000000000000	1
+1.120000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+0.333000000000000000	0
+0.330000000000000000	0
+0.300000000000000000	0
+0.200000000000000000	0
+0.100000000000000000	0
+0.020000000000000000	0
+0.010000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+-0.300000000000000000	0
+-0.330000000000000000	0
+-0.333000000000000000	0
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-1.122000000000000000	-11
+-1255.490000000000000000	-1255
+-4400.000000000000000000	4400
+-1234567890.123456789000000000	-1234567890
 NULL	0
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -125,43 +125,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-1
+-1.120000000000000000	-1
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	1
+1.000000000000000000	1
+1.000000000000000000	1
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	2
+2.000000000000000000	2
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -171,34 +171,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL
--1234567890.123456789
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
-1234567890.12345678
+-1234567890.123456789000000000
+-4400.000000000000000000
+-1255.490000000000000000
+-1.122000000000000000
+-1.120000000000000000
+-0.333000000000000000
+-0.330000000000000000
+-0.300000000000000000
+0.000000000000000000
+0.010000000000000000
+0.020000000000000000
+0.100000000000000000
+0.200000000000000000
+0.300000000000000000
+0.330000000000000000
+0.333000000000000000
+1.000000000000000000
+1.120000000000000000
+1.122000000000000000
+2.000000000000000000
+3.140000000000000000
+10.000000000000000000
+20.000000000000000000
+100.000000000000000000
+124.000000000000000000
+125.200000000000000000
+200.000000000000000000
+1234567890.123456780000000000
 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -208,34 +208,34 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-2
--0.333	0
--0.33	0
--0.3	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
-1	3
-1.12	1
-1.122	1
-2	4
-3.14	13
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400
+-1255.490000000000000000	-1255
+-1.122000000000000000	-11
+-1.120000000000000000	-2
+-0.333000000000000000	0
+-0.330000000000000000	0
+-0.300000000000000000	0
+0.000000000000000000	0
+0.010000000000000000	0
+0.020000000000000000	0
+0.100000000000000000	0
+0.200000000000000000	0
+0.300000000000000000	0
+0.330000000000000000	0
+0.333000000000000000	0
+1.000000000000000000	3
+1.120000000000000000	1
+1.122000000000000000	1
+2.000000000000000000	4
+3.140000000000000000	13
+10.000000000000000000	10
+20.000000000000000000	20
+100.000000000000000000	100
+124.000000000000000000	124
+125.200000000000000000	125
+200.000000000000000000	200
+1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -244,23 +244,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789
--1255	-1255.49
--11	-1.122
--1	-2.24
-0	0.33
-1	5.242
-2	4
-3	9.42
-4	3.14
-10	10
-20	20
-100	100
-124	124
-125	125.2
-200	200
-4400	-4400
-1234567890	1234567890.12345678
+-1234567890	-1234567890.123456789000000000
+-1255	-1255.490000000000000000
+-11	-1.122000000000000000
+-1	-2.240000000000000000
+0	0.330000000000000000
+1	5.242000000000000000
+2	4.000000000000000000
+3	9.420000000000000000
+4	3.140000000000000000
+10	10.000000000000000000
+20	20.000000000000000000
+100	100.000000000000000000
+124	124.000000000000000000
+125	125.200000000000000000
+200	200.000000000000000000
+4400	-4400.000000000000000000
+1234567890	1234567890.123456780000000000
 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -269,71 +269,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -342,10 +342,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3
@@ -354,10 +354,10 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
-3.14	3
-3.14	3
-3.14	3
-3.14	4
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	3
+3.140000000000000000	4
 PREHOOK: query: DROP TABLE DECIMAL_3
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_3

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/decimal_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_4.q.out b/ql/src/test/results/clientpositive/decimal_4.q.out
index 50662af..8eb1de4 100644
--- a/ql/src/test/results/clientpositive/decimal_4.q.out
+++ b/ql/src/test/results/clientpositive/decimal_4.q.out
@@ -57,43 +57,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_1
 #### A masked pattern was here ####
 NULL	0
--1234567890.123456789	-1234567890
--4400	4400
--1255.49	-1255
--1.122	-11
--1.12	-1
--1.12	-1
--0.333	0
--0.33	0
--0.3	0
-0	0
-0	0
-0	0
-0.01	0
-0.02	0
-0.1	0
-0.2	0
-0.3	0
-0.33	0
-0.333	0
+-1234567890.1234567890000000000000000	-1234567890
+-4400.0000000000000000000000000	4400
+-1255.4900000000000000000000000	-1255
+-1.1220000000000000000000000	-11
+-1.1200000000000000000000000	-1
+-1.1200000000000000000000000	-1
+-0.3330000000000000000000000	0
+-0.3300000000000000000000000	0
+-0.3000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0000000000000000000000000	0
+0.0100000000000000000000000	0
+0.0200000000000000000000000	0
+0.1000000000000000000000000	0
+0.2000000000000000000000000	0
+0.3000000000000000000000000	0
+0.3300000000000000000000000	0
+0.3330000000000000000000000	0
 0.9999999999999999999999999	1
-1	1
-1	1
-1.12	1
-1.122	1
-2	2
-2	2
-3.14	3
-3.14	3
-3.14	3
-3.14	4
-10	10
-20	20
-100	100
-124	124
-125.2	125
-200	200
-1234567890.12345678	1234567890
+1.0000000000000000000000000	1
+1.0000000000000000000000000	1
+1.1200000000000000000000000	1
+1.1220000000000000000000000	1
+2.0000000000000000000000000	2
+2.0000000000000000000000000	2
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	3
+3.1400000000000000000000000	4
+10.0000000000000000000000000	10
+20.0000000000000000000000000	20
+100.0000000000000000000000000	100
+124.0000000000000000000000000	124
+125.2000000000000000000000000	125
+200.0000000000000000000000000	200
+1234567890.1234567800000000000000000	1234567890
 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_4_2
@@ -103,43 +103,43 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_4_2
 #### A masked pattern was here ####
 NULL	NULL
--1234567890.123456789	-3703703670.370370367
--4400	-13200
--1255.49	-3766.47
--1.122	-3.366
--1.12	-3.36
--1.12	-3.36
--0.333	-0.999
--0.33	-0.99
--0.3	-0.9
-0	0
-0	0
-0	0
-0.01	0.03
-0.02	0.06
-0.1	0.3
-0.2	0.6
-0.3	0.9
-0.33	0.99
-0.333	0.999
+-1234567890.1234567890000000000000000	-3703703670.3703703670000000000000000
+-4400.0000000000000000000000000	-13200.0000000000000000000000000
+-1255.4900000000000000000000000	-3766.4700000000000000000000000
+-1.1220000000000000000000000	-3.3660000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-1.1200000000000000000000000	-3.3600000000000000000000000
+-0.3330000000000000000000000	-0.9990000000000000000000000
+-0.3300000000000000000000000	-0.9900000000000000000000000
+-0.3000000000000000000000000	-0.9000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0000000000000000000000000	0.0000000000000000000000000
+0.0100000000000000000000000	0.0300000000000000000000000
+0.0200000000000000000000000	0.0600000000000000000000000
+0.1000000000000000000000000	0.3000000000000000000000000
+0.2000000000000000000000000	0.6000000000000000000000000
+0.3000000000000000000000000	0.9000000000000000000000000
+0.3300000000000000000000000	0.9900000000000000000000000
+0.3330000000000000000000000	0.9990000000000000000000000
 0.9999999999999999999999999	2.9999999999999999999999997
-1	3
-1	3
-1.12	3.36
-1.122	3.366
-2	6
-2	6
-3.14	9.42
-3.14	9.42
-3.14	9.42
-3.14	9.42
-10	30
-20	60
-100	300
-124	372
-125.2	375.6
-200	600
-1234567890.12345678	3703703670.37037034
+1.0000000000000000000000000	3.0000000000000000000000000
+1.0000000000000000000000000	3.0000000000000000000000000
+1.1200000000000000000000000	3.3600000000000000000000000
+1.1220000000000000000000000	3.3660000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+2.0000000000000000000000000	6.0000000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+3.1400000000000000000000000	9.4200000000000000000000000
+10.0000000000000000000000000	30.0000000000000000000000000
+20.0000000000000000000000000	60.0000000000000000000000000
+100.0000000000000000000000000	300.0000000000000000000000000
+124.0000000000000000000000000	372.0000000000000000000000000
+125.2000000000000000000000000	375.6000000000000000000000000
+200.0000000000000000000000000	600.0000000000000000000000000
+1234567890.1234567800000000000000000	3703703670.3703703400000000000000000
 PREHOOK: query: DROP TABLE DECIMAL_4_1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/decimal_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_5.q.out b/ql/src/test/results/clientpositive/decimal_5.q.out
index 0f24b8a..0c46538 100644
--- a/ql/src/test/results/clientpositive/decimal_5.q.out
+++ b/ql/src/test/results/clientpositive/decimal_5.q.out
@@ -35,41 +35,41 @@ POSTHOOK: Input: default@decimal_5
 NULL
 NULL
 NULL
--4400
--1255.49
--1.122
--1.12
--1.12
--0.333
--0.33
--0.3
-0
-0
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1
-1
-1.12
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.00000
+1.00000
+1.12000
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -79,32 +79,32 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
--4400
--1255.49
--1.122
--1.12
--0.333
--0.33
--0.3
-0
-0.01
-0.02
-0.1
-0.2
-0.3
-0.33
-0.333
-1
-1.12
-1.122
-2
-3.14
-10
-20
-100
-124
-125.2
-200
+-4400.00000
+-1255.49000
+-1.12200
+-1.12000
+-0.33300
+-0.33000
+-0.30000
+0.00000
+0.01000
+0.02000
+0.10000
+0.20000
+0.30000
+0.33000
+0.33300
+1.00000
+1.12000
+1.12200
+2.00000
+3.14000
+10.00000
+20.00000
+100.00000
+124.00000
+125.20000
+200.00000
 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -161,40 +161,40 @@ POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
 NULL
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
+0.000
+0.000
+100.000
+10.000
+1.000
+0.100
+0.010
+200.000
+20.000
+2.000
+0.000
+0.200
+0.020
+0.300
+0.330
 0.333
--0.3
--0.33
+-0.300
+-0.330
 -0.333
-1
-2
-3.14
--1.12
--1.12
+1.000
+2.000
+3.140
+-1.120
+-1.120
 -1.122
-1.12
+1.120
 1.122
-124
-125.2
+124.000
+125.200
 NULL
-3.14
-3.14
-3.14
-1
+3.140
+3.140
+3.140
+1.000
 NULL
 NULL
 PREHOOK: query: DROP TABLE DECIMAL_5

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_6.q.out b/ql/src/test/results/clientpositive/decimal_6.q.out
index 0344fa9..e1ce600 100644
--- a/ql/src/test/results/clientpositive/decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/decimal_6.q.out
@@ -78,54 +78,54 @@ NULL
 NULL
 NULL
 NULL
--1234567890.1235
--4400
--4400
--1255.49
--1255.49
--1.122
--1.122
--1.12
--1.12
--0.333
--0.333
--0.3
--0.3
-0
-0
-0
-0
-0.333
-0.333
-1
-1
-1
-1
-1.12
-1.12
-1.122
-1.122
-2
-2
-3.14
-3.14
-3.14
-3.14
-3.14
-3.14
-10
-10
-10.7343
+-1234567890.12350
+-4400.00000
+-4400.00000
+-1255.49000
+-1255.49000
+-1.12200
+-1.12200
+-1.12000
+-1.12000
+-0.33300
+-0.33300
+-0.30000
+-0.30000
+0.00000
+0.00000
+0.00000
+0.00000
+0.33300
+0.33300
+1.00000
+1.00000
+1.00000
+1.00000
+1.12000
+1.12000
+1.12200
+1.12200
+2.00000
+2.00000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+3.14000
+10.00000
+10.00000
+10.73430
 10.73433
-124
-124
-125.2
-125.2
+124.00000
+124.00000
+125.20000
+125.20000
 23232.23435
-23232.2344
-2389432.2375
-2389432.2375
-1234567890.1235
+23232.23440
+2389432.23750
+2389432.23750
+1234567890.12350
 PREHOOK: query: CREATE TABLE DECIMAL_6_3 AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@decimal_6_1

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/decimal_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_join2.q.out b/ql/src/test/results/clientpositive/decimal_join2.q.out
index 604f99b..a3ca231 100644
--- a/ql/src/test/results/clientpositive/decimal_join2.q.out
+++ b/ql/src/test/results/clientpositive/decimal_join2.q.out
@@ -132,71 +132,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: EXPLAIN
 SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value
 PREHOOK: type: QUERY
@@ -282,71 +282,71 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_3
 #### A masked pattern was here ####
--1234567890.123456789	-1234567890	-1234567890.123456789	-1234567890
--4400	4400	-4400	4400
--1255.49	-1255	-1255.49	-1255
--1.122	-11	-1.122	-11
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--1.12	-1	-1.12	-1
--0.333	0	-0.333	0
--0.33	0	-0.33	0
--0.3	0	-0.3	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0	0	0	0
-0.01	0	0.01	0
-0.02	0	0.02	0
-0.1	0	0.1	0
-0.2	0	0.2	0
-0.3	0	0.3	0
-0.33	0	0.33	0
-0.333	0	0.333	0
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1	1	1	1
-1.12	1	1.12	1
-1.122	1	1.122	1
-2	2	2	2
-2	2	2	2
-2	2	2	2
-2	2	2	2
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	3
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	3	3.14	4
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	3
-3.14	4	3.14	4
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125.2	125	125.2	125
-200	200	200	200
-1234567890.12345678	1234567890	1234567890.12345678	1234567890
+-1234567890.123456789000000000	-1234567890	-1234567890.123456789000000000	-1234567890
+-4400.000000000000000000	4400	-4400.000000000000000000	4400
+-1255.490000000000000000	-1255	-1255.490000000000000000	-1255
+-1.122000000000000000	-11	-1.122000000000000000	-11
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-1.120000000000000000	-1	-1.120000000000000000	-1
+-0.333000000000000000	0	-0.333000000000000000	0
+-0.330000000000000000	0	-0.330000000000000000	0
+-0.300000000000000000	0	-0.300000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.000000000000000000	0	0.000000000000000000	0
+0.010000000000000000	0	0.010000000000000000	0
+0.020000000000000000	0	0.020000000000000000	0
+0.100000000000000000	0	0.100000000000000000	0
+0.200000000000000000	0	0.200000000000000000	0
+0.300000000000000000	0	0.300000000000000000	0
+0.330000000000000000	0	0.330000000000000000	0
+0.333000000000000000	0	0.333000000000000000	0
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.000000000000000000	1	1.000000000000000000	1
+1.120000000000000000	1	1.120000000000000000	1
+1.122000000000000000	1	1.122000000000000000	1
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+2.000000000000000000	2	2.000000000000000000	2
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	3
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	3	3.140000000000000000	4
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	3
+3.140000000000000000	4	3.140000000000000000	4
+10.000000000000000000	10	10.000000000000000000	10
+20.000000000000000000	20	20.000000000000000000	20
+100.000000000000000000	100	100.000000000000000000	100
+124.000000000000000000	124	124.000000000000000000	124
+125.200000000000000000	125	125.200000000000000000	125
+200.000000000000000000	200	200.000000000000000000	200
+1234567890.123456780000000000	1234567890	1234567890.123456780000000000	1234567890
 PREHOOK: query: DROP TABLE DECIMAL_3_txt
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_3_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_precision.q.out b/ql/src/test/results/clientpositive/decimal_precision.q.out
index 69a6045..5542b40 100644
--- a/ql/src/test/results/clientpositive/decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/decimal_precision.q.out
@@ -76,13 +76,13 @@ NULL
 NULL
 NULL
 NULL
-0
-0
-0
-0
-0
-0.123456789
-0.123456789
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1234567890
+0.1234567890
 1.2345678901
 1.2345678901
 1.2345678901
@@ -99,14 +99,14 @@ NULL
 12345.6789012346
 123456.7890123456
 123456.7890123457
-1234567.890123456
+1234567.8901234560
 1234567.8901234568
-12345678.90123456
+12345678.9012345600
 12345678.9012345679
-123456789.0123456
+123456789.0123456000
 123456789.0123456789
-1234567890.123456
-1234567890.123456789
+1234567890.1234560000
+1234567890.1234567890
 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -159,13 +159,13 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0	1	-1
-0.123456789	1.123456789	-0.876543211
-0.123456789	1.123456789	-0.876543211
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.0000000000	1.0000000000	-1.0000000000
+0.1234567890	1.1234567890	-0.8765432110
+0.1234567890	1.1234567890	-0.8765432110
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
 1.2345678901	2.2345678901	0.2345678901
@@ -182,14 +182,14 @@ NULL	NULL	NULL
 12345.6789012346	12346.6789012346	12344.6789012346
 123456.7890123456	123457.7890123456	123455.7890123456
 123456.7890123457	123457.7890123457	123455.7890123457
-1234567.890123456	1234568.890123456	1234566.890123456
+1234567.8901234560	1234568.8901234560	1234566.8901234560
 1234567.8901234568	1234568.8901234568	1234566.8901234568
-12345678.90123456	12345679.90123456	12345677.90123456
+12345678.9012345600	12345679.9012345600	12345677.9012345600
 12345678.9012345679	12345679.9012345679	12345677.9012345679
-123456789.0123456	123456790.0123456	123456788.0123456
+123456789.0123456000	123456790.0123456000	123456788.0123456000
 123456789.0123456789	123456790.0123456789	123456788.0123456789
-1234567890.123456	1234567891.123456	1234567889.123456
-1234567890.123456789	1234567891.123456789	1234567889.123456789
+1234567890.1234560000	1234567891.1234560000	1234567889.1234560000
+1234567890.1234567890	1234567891.1234567890	1234567889.1234567890
 PREHOOK: query: SELECT dec, dec * 2, dec / 3  FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -242,37 +242,37 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0	0	0
-0.123456789	0.246913578	0.041152263
-0.123456789	0.246913578	0.041152263
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.0000000000	0.0000000000	0.000000000000
+0.1234567890	0.2469135780	0.041152263000
+0.1234567890	0.2469135780	0.041152263000
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
 1.2345678901	2.4691357802	0.411522630033
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-12.3456789012	24.6913578024	4.1152263004
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-123.4567890123	246.9135780246	41.1522630041
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
-1234.5678901235	2469.135780247	411.522630041167
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+12.3456789012	24.6913578024	4.115226300400
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+123.4567890123	246.9135780246	41.152263004100
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
+1234.5678901235	2469.1357802470	411.522630041167
 12345.6789012346	24691.3578024692	4115.226300411533
 12345.6789012346	24691.3578024692	4115.226300411533
-123456.7890123456	246913.5780246912	41152.2630041152
+123456.7890123456	246913.5780246912	41152.263004115200
 123456.7890123457	246913.5780246914	41152.263004115233
-1234567.890123456	2469135.780246912	411522.630041152
+1234567.8901234560	2469135.7802469120	411522.630041152000
 1234567.8901234568	2469135.7802469136	411522.630041152267
-12345678.90123456	24691357.80246912	4115226.30041152
+12345678.9012345600	24691357.8024691200	4115226.300411520000
 12345678.9012345679	24691357.8024691358	4115226.300411522633
-123456789.0123456	246913578.0246912	41152263.0041152
-123456789.0123456789	246913578.0246913578	41152263.0041152263
-1234567890.123456	2469135780.246912	411522630.041152
-1234567890.123456789	2469135780.246913578	411522630.041152263
+123456789.0123456000	246913578.0246912000	41152263.004115200000
+123456789.0123456789	246913578.0246913578	41152263.004115226300
+1234567890.1234560000	2469135780.2469120000	411522630.041152000000
+1234567890.1234567890	2469135780.2469135780	411522630.041152263000
 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -325,13 +325,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.013717421
-0.123456789	0.013717421
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.0000000000	0.000000000000
+0.1234567890	0.013717421000
+0.1234567890	0.013717421000
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
 1.2345678901	0.137174210011
@@ -348,14 +348,14 @@ NULL	NULL
 12345.6789012346	1371.742100137178
 123456.7890123456	13717.421001371733
 123456.7890123457	13717.421001371744
-1234567.890123456	137174.210013717333
+1234567.8901234560	137174.210013717333
 1234567.8901234568	137174.210013717422
-12345678.90123456	1371742.100137173333
+12345678.9012345600	1371742.100137173333
 12345678.9012345679	1371742.100137174211
-123456789.0123456	13717421.001371733333
-123456789.0123456789	13717421.0013717421
-1234567890.123456	137174210.013717333333
-1234567890.123456789	137174210.013717421
+123456789.0123456000	13717421.001371733333
+123456789.0123456789	13717421.001371742100
+1234567890.1234560000	137174210.013717333333
+1234567890.1234567890	137174210.013717421000
 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -408,13 +408,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.0045724736667
-0.123456789	0.0045724736667
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.0000000000	0.0000000000000
+0.1234567890	0.0045724736667
+0.1234567890	0.0045724736667
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
 1.2345678901	0.0457247366704
@@ -431,14 +431,14 @@ NULL	NULL
 12345.6789012346	457.2473667123926
 123456.7890123456	4572.4736671239111
 123456.7890123457	4572.4736671239148
-1234567.890123456	45724.7366712391111
+1234567.8901234560	45724.7366712391111
 1234567.8901234568	45724.7366712391407
-12345678.90123456	457247.3667123911111
+12345678.9012345600	457247.3667123911111
 12345678.9012345679	457247.3667123914037
-123456789.0123456	4572473.6671239111111
+123456789.0123456000	4572473.6671239111111
 123456789.0123456789	4572473.6671239140333
-1234567890.123456	45724736.6712391111111
-1234567890.123456789	45724736.6712391403333
+1234567890.1234560000	45724736.6712391111111
+1234567890.1234567890	45724736.6712391403333
 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision
@@ -491,13 +491,13 @@ NULL	NULL
 NULL	NULL
 NULL	NULL
 NULL	NULL
-0	0
-0	0
-0	0
-0	0
-0	0
-0.123456789	0.015241578750190521
-0.123456789	0.015241578750190521
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.0000000000	0.00000000000000000000
+0.1234567890	0.01524157875019052100
+0.1234567890	0.01524157875019052100
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
 1.2345678901	1.52415787526596567801
@@ -514,14 +514,14 @@ NULL	NULL
 12345.6789012346	152415787.53238916034140423716
 123456.7890123456	15241578753.23881726870921383936
 123456.7890123457	15241578753.23884196006701630849
-1234567.890123456	1524157875323.881726870921383936
+1234567.8901234560	1524157875323.88172687092138393600
 1234567.8901234568	1524157875323.88370217954558146624
-12345678.90123456	152415787532388.1726870921383936
+12345678.9012345600	152415787532388.17268709213839360000
 12345678.9012345679	152415787532388.36774881877789971041
-123456789.0123456	15241578753238817.26870921383936
+123456789.0123456000	15241578753238817.26870921383936000000
 123456789.0123456789	15241578753238836.75019051998750190521
-1234567890.123456	NULL
-1234567890.123456789	NULL
+1234567890.1234560000	NULL
+1234567890.1234567890	NULL
 PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
@@ -613,7 +613,7 @@ POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_precision
 #### A masked pattern was here ####
-12345678901234567890.12345678
+12345678901234567890.123456780000000000
 PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_precision

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/decimal_trailing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_trailing.q.out b/ql/src/test/results/clientpositive/decimal_trailing.q.out
index 6cfe282..1b70737 100644
--- a/ql/src/test/results/clientpositive/decimal_trailing.q.out
+++ b/ql/src/test/results/clientpositive/decimal_trailing.q.out
@@ -40,16 +40,16 @@ POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_trailing
 #### A masked pattern was here ####
-0	0	0
-1	0	0
+0	0.0000	0.00000000
+1	0.0000	0.00000000
 2	NULL	NULL
-3	1	1
-4	10	10
-5	100	100
-6	1000	1000
-7	10000	10000
-8	100000	100000
-9	NULL	1000000
+3	1.0000	1.00000000
+4	10.0000	10.00000000
+5	100.0000	100.00000000
+6	1000.0000	1000.00000000
+7	10000.0000	10000.00000000
+8	100000.0000	100000.00000000
+9	NULL	1000000.00000000
 10	NULL	NULL
 11	NULL	NULL
 12	NULL	NULL
@@ -58,18 +58,18 @@ POSTHOOK: Input: default@decimal_trailing
 15	NULL	NULL
 16	NULL	NULL
 17	NULL	NULL
-18	1	1
-19	10	10
-20	100	100
-21	1000	1000
-22	100000	10000
-23	0	0
-24	0	0
-25	0	0
-26	0	0
-27	0	0
-28	12313.2	134134.312525
-29	99999.999	134134.31242553
+18	1.0000	1.00000000
+19	10.0000	10.00000000
+20	100.0000	100.00000000
+21	1000.0000	1000.00000000
+22	100000.0000	10000.00000000
+23	0.0000	0.00000000
+24	0.0000	0.00000000
+25	0.0000	0.00000000
+26	0.0000	0.00000000
+27	0.0000	0.00000000
+28	12313.2000	134134.31252500
+29	99999.9990	134134.31242553
 PREHOOK: query: DROP TABLE DECIMAL_TRAILING
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_trailing


[07/55] [abbrv] hive git commit: HIVE-11634 : Support partition pruning for IN(STRUCT(partcol, nonpartcol..)...) (Hari Subramaniyan, reviewed by Laljo John Pullokkaran)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/pointlookup.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pointlookup.q.out b/ql/src/test/results/clientpositive/pointlookup.q.out
index 7e19be4..a99b388 100644
--- a/ql/src/test/results/clientpositive/pointlookup.q.out
+++ b/ql/src/test/results/clientpositive/pointlookup.q.out
@@ -176,15 +176,15 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((value) IN ('1', '3', '5', '6', '8') and (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3'))) (type: boolean)
-              Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+              predicate: (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3')) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
index 7e28c77..792ccaf 100644
--- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
@@ -1236,21 +1236,6 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
         Reducer 2 
             Reduce Operator Tree:
               Merge Join Operator
@@ -3944,21 +3929,6 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -5063,21 +5033,6 @@ STAGE PLANS:
                           Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                           Target column: ds
                           Target Vertex: Map 1
-                    Select Operator
-                      expressions: UDFToDouble(hr) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart_orc
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
index 1103e80..da2033b 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
@@ -1251,21 +1251,6 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
         Reducer 2 
             Reduce Operator Tree:
               Merge Join Operator
@@ -3995,21 +3980,6 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
@@ -5131,21 +5101,6 @@ STAGE PLANS:
                           Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                           Target column: ds
                           Target Vertex: Map 1
-                    Select Operator
-                      expressions: UDFToDouble(hr) (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                        Dynamic Partitioning Event Operator
-                          Target Input: srcpart_orc
-                          Partition key expr: UDFToDouble(hr)
-                          Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                          Target column: hr
-                          Target Vertex: Map 1
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator


[25/55] [abbrv] hive git commit: HIVE-12202 NPE thrown when reading legacy ACID delta files(missed a file)(Elliot West via Eugene Koifman)

Posted by jx...@apache.org.
HIVE-12202 NPE thrown when reading legacy ACID delta files(missed a file)(Elliot West via Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/50177ef6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/50177ef6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/50177ef6

Branch: refs/heads/master-fixed
Commit: 50177ef69486730c10ee9460870eefe51050826b
Parents: 6577f55
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Tue Nov 3 10:46:36 2015 -0800
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Tue Nov 3 10:46:36 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/io/TestAcidInputFormat.java  | 88 ++++++++++++++++++++
 1 file changed, 88 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/50177ef6/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java
new file mode 100644
index 0000000..6a77670
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java
@@ -0,0 +1,88 @@
+package org.apache.hadoop.hive.ql.io;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.DataInput;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.io.AcidInputFormat.DeltaMetaData;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.runners.MockitoJUnitRunner;
+
+@RunWith(MockitoJUnitRunner.class)
+public class TestAcidInputFormat {
+
+  @Mock
+  private DataInput mockDataInput;
+
+  @Test
+  public void testDeltaMetaDataReadFieldsNoStatementIds() throws Exception {
+    when(mockDataInput.readLong()).thenReturn(1L, 2L);
+    when(mockDataInput.readInt()).thenReturn(0);
+
+    DeltaMetaData deltaMetaData = new AcidInputFormat.DeltaMetaData();
+    deltaMetaData.readFields(mockDataInput);
+
+    verify(mockDataInput, times(1)).readInt();
+    assertThat(deltaMetaData.getMinTxnId(), is(1L));
+    assertThat(deltaMetaData.getMaxTxnId(), is(2L));
+    assertThat(deltaMetaData.getStmtIds().isEmpty(), is(true));
+  }
+
+  @Test
+  public void testDeltaMetaDataReadFieldsWithStatementIds() throws Exception {
+    when(mockDataInput.readLong()).thenReturn(1L, 2L);
+    when(mockDataInput.readInt()).thenReturn(2, 100, 101);
+
+    DeltaMetaData deltaMetaData = new AcidInputFormat.DeltaMetaData();
+    deltaMetaData.readFields(mockDataInput);
+
+    verify(mockDataInput, times(3)).readInt();
+    assertThat(deltaMetaData.getMinTxnId(), is(1L));
+    assertThat(deltaMetaData.getMaxTxnId(), is(2L));
+    assertThat(deltaMetaData.getStmtIds().size(), is(2));
+    assertThat(deltaMetaData.getStmtIds().get(0), is(100));
+    assertThat(deltaMetaData.getStmtIds().get(1), is(101));
+  }
+
+  @Test
+  public void testDeltaMetaConstructWithState() throws Exception {
+    DeltaMetaData deltaMetaData = new AcidInputFormat.DeltaMetaData(2000L, 2001L, Arrays.asList(97, 98, 99));
+
+    assertThat(deltaMetaData.getMinTxnId(), is(2000L));
+    assertThat(deltaMetaData.getMaxTxnId(), is(2001L));
+    assertThat(deltaMetaData.getStmtIds().size(), is(3));
+    assertThat(deltaMetaData.getStmtIds().get(0), is(97));
+    assertThat(deltaMetaData.getStmtIds().get(1), is(98));
+    assertThat(deltaMetaData.getStmtIds().get(2), is(99));
+  }
+
+  @Test
+  public void testDeltaMetaDataReadFieldsWithStatementIdsResetsState() throws Exception {
+    when(mockDataInput.readLong()).thenReturn(1L, 2L);
+    when(mockDataInput.readInt()).thenReturn(2, 100, 101);
+
+    List<Integer> statementIds = new ArrayList<>();
+    statementIds.add(97);
+    statementIds.add(98);
+    statementIds.add(99);
+    DeltaMetaData deltaMetaData = new AcidInputFormat.DeltaMetaData(2000L, 2001L, statementIds);
+    deltaMetaData.readFields(mockDataInput);
+
+    verify(mockDataInput, times(3)).readInt();
+    assertThat(deltaMetaData.getMinTxnId(), is(1L));
+    assertThat(deltaMetaData.getMaxTxnId(), is(2L));
+    assertThat(deltaMetaData.getStmtIds().size(), is(2));
+    assertThat(deltaMetaData.getStmtIds().get(0), is(100));
+    assertThat(deltaMetaData.getStmtIds().get(1), is(101));
+  }
+
+}


[12/55] [abbrv] hive git commit: HIVE-12171 : LLAP: BuddyAllocator failures when querying uncompressed data (Sergey Shelukhin, reviewed by Gopal V)

Posted by jx...@apache.org.
HIVE-12171 : LLAP: BuddyAllocator failures when querying uncompressed data (Sergey Shelukhin, reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cdbd1c85
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cdbd1c85
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cdbd1c85

Branch: refs/heads/master-fixed
Commit: cdbd1c8517e70614ec9dfd0bfdc978b200a946c2
Parents: a46005c
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Nov 2 13:16:34 2015 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Nov 2 13:16:34 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   7 +-
 .../hadoop/hive/llap/cache/BuddyAllocator.java  |  89 +++++++++------
 .../llap/cache/LowLevelCacheMemoryManager.java  |  12 ++
 .../hadoop/hive/llap/cache/MemoryManager.java   |   1 +
 .../hive/llap/cache/TestBuddyAllocator.java     |   6 +-
 .../hive/llap/cache/TestOrcMetadataCache.java   |   4 +
 .../ql/io/orc/encoded/EncodedReaderImpl.java    | 109 ++++++++++---------
 7 files changed, 144 insertions(+), 84 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 5198bb5..3ab73ad 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2308,9 +2308,10 @@ public class HiveConf extends Configuration {
     LLAP_ORC_CACHE_MAX_ALLOC("hive.llap.io.cache.orc.alloc.max", 16 * 1024 * 1024,
         "Maximum allocation possible from LLAP low-level cache for ORC. Should be as large as\n" +
         "the largest expected ORC compression buffer size. Must be power of 2."),
-    LLAP_ORC_CACHE_ARENA_SIZE("hive.llap.io.cache.orc.arena.size", 128 * 1024 * 1024,
-        "Arena size for ORC low-level cache; cache will be allocated in arena-sized steps.\n" +
-        "Must presently be a power of two."),
+    LLAP_ORC_CACHE_ARENA_COUNT("hive.llap.io.cache.orc.arena.count", 8,
+        "Arena count for LLAP low-level cache; cache will be allocated in the steps of\n" +
+        "(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\n" +
+        "not the case, an adjusted size will be used. Using powers of 2 is recommended."),
     LLAP_ORC_CACHE_MAX_SIZE("hive.llap.io.cache.orc.size", 1024L * 1024 * 1024,
         "Maximum size for ORC low-level cache; must be a multiple of arena size."),
     LLAP_ORC_CACHE_ALLOCATE_DIRECT("hive.llap.io.cache.direct", true,

http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
index 2aca68d..485a145 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
@@ -40,33 +40,43 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
   private final long maxSize;
   private final boolean isDirect;
   private final LlapDaemonCacheMetrics metrics;
-
+  
+  // We don't know the acceptable size for Java array, so we'll use 1Gb boundary.
+  // That is guaranteed to fit any maximum allocation.
+  private static final int MAX_ARENA_SIZE = 1024*1024*1024;
   public BuddyAllocator(Configuration conf, MemoryManager memoryManager,
       LlapDaemonCacheMetrics metrics) {
     isDirect = HiveConf.getBoolVar(conf, ConfVars.LLAP_ORC_CACHE_ALLOCATE_DIRECT);
     minAllocation = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_MIN_ALLOC);
     maxAllocation = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_MAX_ALLOC);
-    arenaSize = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_ARENA_SIZE);
+    int arenaCount = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_ARENA_COUNT);
     long maxSizeVal = HiveConf.getLongVar(conf, ConfVars.LLAP_ORC_CACHE_MAX_SIZE);
-    if (LlapIoImpl.LOGL.isInfoEnabled()) {
+    int arenaSizeVal = (arenaCount == 0) ? MAX_ARENA_SIZE : (int)(maxSizeVal / arenaCount);
+    arenaSizeVal = Math.max(maxAllocation, Math.min(arenaSizeVal, MAX_ARENA_SIZE));
+    if (LlapIoImpl.LOG.isInfoEnabled()) {
       LlapIoImpl.LOG.info("Buddy allocator with " + (isDirect ? "direct" : "byte")
           + " buffers; allocation sizes " + minAllocation + " - " + maxAllocation
-          + ", arena size " + arenaSize + ". total size " + maxSizeVal);
+          + ", arena size " + arenaSizeVal + ". total size " + maxSizeVal);
     }
 
     if (minAllocation < 8) {
       throw new AssertionError("Min allocation must be at least 8: " + minAllocation);
     }
-    if (maxSizeVal < arenaSize || arenaSize < maxAllocation || maxAllocation < minAllocation) {
+    if (maxSizeVal < arenaSizeVal || maxAllocation < minAllocation) {
       throw new AssertionError("Inconsistent sizes of cache, arena and allocations: "
-          + minAllocation + ", " + maxAllocation + ", " + arenaSize + ", " + maxSizeVal);
+          + minAllocation + ", " + maxAllocation + ", " + arenaSizeVal + ", " + maxSizeVal);
+    }
+    if ((Integer.bitCount(minAllocation) != 1) || (Integer.bitCount(maxAllocation) != 1)) {
+      throw new AssertionError("Allocation sizes must be powers of two: "
+          + minAllocation + ", " + maxAllocation);
     }
-    if ((Integer.bitCount(minAllocation) != 1) || (Integer.bitCount(maxAllocation) != 1)
-        || (Long.bitCount(arenaSize) != 1)) {
-      // Technically, arena size only needs to be divisible by maxAlloc
-      throw new AssertionError("Allocation and arena sizes must be powers of two: "
-          + minAllocation + ", " + maxAllocation + ", " + arenaSize);
+    if ((arenaSizeVal % maxAllocation) > 0) {
+      long oldArenaSize = arenaSizeVal;
+      arenaSizeVal = (arenaSizeVal / maxAllocation) * maxAllocation;
+      LlapIoImpl.LOG.warn("Rounding arena size to " + arenaSizeVal + " from " + oldArenaSize
+          + " to be divisible by allocation size " + maxAllocation);
     }
+    arenaSize = arenaSizeVal;
     if ((maxSizeVal % arenaSize) > 0) {
       long oldMaxSize = maxSizeVal;
       maxSizeVal = (maxSizeVal / arenaSize) * arenaSize;
@@ -111,7 +121,7 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
     // TODO: reserving the entire thing is not ideal before we alloc anything. Interleave?
     memoryManager.reserveMemory(dest.length << allocLog2, true);
 
-    int ix = 0;
+    int destAllocIx = 0;
     for (int i = 0; i < dest.length; ++i) {
       if (dest[i] != null) continue;
       dest[i] = createUnallocated(); // TODO: pool of objects?
@@ -123,22 +133,29 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
     }
     long threadId = arenaCount > 1 ? Thread.currentThread().getId() : 0;
     {
-      int startIndex = (int)(threadId % arenaCount), index = startIndex;
+      int startArenaIx = (int)(threadId % arenaCount), index = startArenaIx;
       do {
-        int newIx = arenas[index].allocateFast(index, freeListIx, dest, ix, allocationSize);
-        if (newIx == dest.length) return;
-        if (newIx != -1) {  // TODO: check if it can still happen; count should take care of this.
-          ix = newIx;
-        }
-        ix = newIx;
+        int newDestIx = arenas[index].allocateFast(
+            index, freeListIx, dest, destAllocIx, allocationSize);
+        if (newDestIx == dest.length) return;
+        assert newDestIx != -1;
+        destAllocIx = newDestIx;
         if ((++index) == arenaCount) {
           index = 0;
         }
-      } while (index != startIndex);
+      } while (index != startArenaIx);
     }
 
-    // TODO: this is very hacky.
-    // We called reserveMemory so we know that somewhere in there, there's memory waiting for us.
+    // 1) We can get fragmented on large blocks of uncompressed data. The memory might be
+    // in there, but it might be in separate small blocks. This is a complicated problem, and
+    // several solutions (in order of decreasing ugliness and increasing complexity) are: just
+    // ask to evict the exact-sized block (there may be no such block), evict from a particular
+    // arena (policy would know allocator internals somewhat), store buffer mapping and ask to
+    // evict from specific choice of blocks next to each other or next to already-evicted block,
+    // and finally do a compaction (requires a block mapping and complex sync). For now we'd just
+    // force-evict some memory and avoid both complexity and ugliness, since large blocks are rare.
+    // 2) Fragmentation aside (TODO: and this is a very hacky solution for that),
+    // we called reserveMemory so we know that there's memory waiting for us somewhere.
     // However, we have a class of rare race conditions related to the order of locking/checking of
     // different allocation areas. Simple case - say we have 2 arenas, 256Kb available in arena 2.
     // We look at arena 1; someone deallocs 256Kb from arena 1 and allocs the same from arena 2;
@@ -155,22 +172,32 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
     // But for now we will just retry 5 times 0_o
     for (int attempt = 0; attempt < 5; ++attempt) {
       // Try to split bigger blocks. TODO: again, ideally we would tryLock at least once
-      for (int i = 0; i < arenaCount; ++i) {
-        int newIx = arenas[i].allocateWithSplit(i, freeListIx, dest, ix, allocationSize);
-        if (newIx == -1) break; // Shouldn't happen.
-        if (newIx == dest.length) return;
-        ix = newIx;
+      {
+        int startArenaIx = (int)((threadId + attempt) % arenaCount), arenaIx = startArenaIx;
+        do {
+          int newDestIx = arenas[arenaIx].allocateWithSplit(
+              arenaIx, freeListIx, dest, destAllocIx, allocationSize);
+          if (newDestIx == dest.length) return;
+          assert newDestIx != -1;
+          destAllocIx = newDestIx;
+          if ((++arenaIx) == arenaCount) {
+            arenaIx = 0;
+          }
+        } while (arenaIx != startArenaIx);
       }
+
       if (attempt == 0) {
         // Try to allocate memory if we haven't allocated all the way to maxSize yet; very rare.
-        for (int i = arenaCount; i < arenas.length; ++i) {
-          ix = arenas[i].allocateWithExpand(i, freeListIx, dest, ix, allocationSize);
-          if (ix == dest.length) return;
+        for (int arenaIx = arenaCount; arenaIx < arenas.length; ++arenaIx) {
+          destAllocIx = arenas[arenaIx].allocateWithExpand(
+              arenaIx, freeListIx, dest, destAllocIx, allocationSize);
+          if (destAllocIx == dest.length) return;
         }
       }
+      memoryManager.forceReservedMemory(allocationSize * (dest.length - destAllocIx));
       LlapIoImpl.LOG.warn("Failed to allocate despite reserved memory; will retry " + attempt);
     }
-    String msg = "Failed to allocate " + size + "; at " + ix + " out of " + dest.length;
+    String msg = "Failed to allocate " + size + "; at " + destAllocIx + " out of " + dest.length;
     LlapIoImpl.LOG.error(msg + "\nALLOCATOR STATE:\n" + debugDump()
         + "\nPARENT STATE:\n" + memoryManager.debugDumpForOom());
     throw new AllocatorOutOfMemoryException(msg);

http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
index 4a256ee..d584ca8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
@@ -71,6 +71,8 @@ public class LowLevelCacheMemoryManager implements MemoryManager {
           try {
             Thread.sleep(Math.min(1000, nextLog));
           } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            return false;
           }
         }
         continue;
@@ -90,6 +92,16 @@ public class LowLevelCacheMemoryManager implements MemoryManager {
     return true;
   }
 
+
+  @Override
+  public void forceReservedMemory(int memoryToEvict) {
+    while (memoryToEvict > 0) {
+      long evicted = evictor.evictSomeBlocks(memoryToEvict);
+      if (evicted == 0) return;
+      memoryToEvict -= evicted;
+    }
+  }
+
   @Override
   public void releaseMemory(long memoryToRelease) {
     long oldV;

http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
index e1b0cb4..6cc262e 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
@@ -22,4 +22,5 @@ public interface MemoryManager extends LlapOomDebugDump {
   boolean reserveMemory(long memoryToReserve, boolean waitForEviction);
   void releaseMemory(long memUsage);
   void updateMaxSize(long maxSize);
+  void forceReservedMemory(int memoryToEvict);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
index 6d21997..6375996 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
@@ -58,6 +58,10 @@ public class TestBuddyAllocator {
     @Override
     public void updateMaxSize(long maxSize) {
     }
+
+    @Override
+    public void forceReservedMemory(int memoryToEvict) {
+    }
   }
 
   @Test
@@ -280,7 +284,7 @@ public class TestBuddyAllocator {
     Configuration conf = new Configuration();
     conf.setInt(ConfVars.LLAP_ORC_CACHE_MIN_ALLOC.varname, min);
     conf.setInt(ConfVars.LLAP_ORC_CACHE_MAX_ALLOC.varname, max);
-    conf.setInt(ConfVars.LLAP_ORC_CACHE_ARENA_SIZE.varname, arena);
+    conf.setInt(ConfVars.LLAP_ORC_CACHE_ARENA_COUNT.varname, total/arena);
     conf.setLong(ConfVars.LLAP_ORC_CACHE_MAX_SIZE.varname, total);
     return conf;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
index b886d77..901e58a 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
@@ -78,6 +78,10 @@ public class TestOrcMetadataCache {
     @Override
     public void updateMaxSize(long maxSize) {
     }
+
+    @Override
+    public void forceReservedMemory(int memoryToEvict) {
+    }
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index e0c0743..f789a4f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamD
 import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
 import org.apache.hadoop.hive.ql.io.orc.CompressionCodec;
 import org.apache.hadoop.hive.ql.io.orc.DataReader;
+import org.apache.hadoop.hive.ql.io.orc.OrcConf;
 import org.apache.hadoop.hive.ql.io.orc.OrcProto;
 import org.apache.hadoop.hive.ql.io.orc.OutStream;
 import org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils;
@@ -751,7 +752,7 @@ class EncodedReaderImpl implements EncodedReader {
 
   /**
    * To achieve some sort of consistent cache boundaries, we will cache streams deterministically;
-   * in segments starting w/stream start, and going for either stream size or maximum allocation.
+   * in segments starting w/stream start, and going for either stream size or some fixed size.
    * If we are not reading the entire segment's worth of data, then we will not cache the partial
    * RGs; the breakage of cache assumptions (no interleaving blocks, etc.) is way too much PITA
    * to handle just for this case.
@@ -777,87 +778,87 @@ class EncodedReaderImpl implements EncodedReader {
     }
     // Account for maximum cache buffer size.
     long streamLen = streamEnd - streamOffset;
-    int partSize = cache.getAllocator().getMaxAllocation(),
-        partCount = (int)((streamLen / partSize) + (((streamLen % partSize) != 0) ? 1 : 0));
-    long partOffset = streamOffset, partEnd = Math.min(partOffset + partSize, streamEnd);
+    int partSize = determineUncompressedPartSize(), //
+        partCount = (int)(streamLen / partSize) + (((streamLen % partSize) != 0) ? 1 : 0);
 
     CacheChunk lastUncompressed = null;
     MemoryBuffer[] singleAlloc = new MemoryBuffer[1];
+    /*
+Starting pre-read for [12187411,17107411) at start: 12187411 end: 12449555 cache buffer: 0x5f64a8f6(2)
+Processing uncompressed file data at [12187411, 12449555)
+  */
     for (int i = 0; i < partCount; ++i) {
-      long hasEntirePartTo = -1;
-      if (partOffset == current.getOffset()) {
-        hasEntirePartTo = partOffset;
+      long partOffset = streamOffset + (i * partSize),
+           partEnd = Math.min(partOffset + partSize, streamEnd);
+      long hasEntirePartTo = partOffset; // We have 0 bytes of data for this part, for now.
+      assert partOffset <= current.getOffset();
+      if (partOffset == current.getOffset() && current instanceof CacheChunk) {
         // We assume cache chunks would always match the way we read, so check and skip it.
-        if (current instanceof CacheChunk) {
-          lastUncompressed = (CacheChunk)current;
-          assert current.getOffset() == partOffset && current.getEnd() == partEnd;
-          partOffset = partEnd;
-          partEnd = Math.min(partOffset + partSize, streamEnd);
-          continue;
-        }
+        assert current.getOffset() == partOffset && current.getEnd() == partEnd;
+        lastUncompressed = (CacheChunk)current;
+        current = current.next;
+        continue;
       }
       if (current.getOffset() >= partEnd) {
-        // We have no data at all for this part of the stream (could be unneeded), skip.
-        partOffset = partEnd;
-        partEnd = Math.min(partOffset + partSize, streamEnd);
-        continue;
+        continue; // We have no data at all for this part of the stream (could be unneeded), skip.
       }
       if (toRelease == null && dataReader.isTrackingDiskRanges()) {
         toRelease = new ArrayList<ByteBuffer>();
       }
       // We have some disk buffers... see if we have entire part, etc.
-      UncompressedCacheChunk candidateCached = null;
+      UncompressedCacheChunk candidateCached = null; // We will cache if we have the entire part.
       DiskRangeList next = current;
       while (true) {
-        if (next == null || next.getOffset() >= partEnd) {
-          if (hasEntirePartTo < partEnd && candidateCached != null) {
-            // We are missing a section at the end of the part...
-            lastUncompressed = copyAndReplaceCandidateToNonCached(
-                candidateCached, partOffset, hasEntirePartTo, cache, singleAlloc);
-            candidateCached = null;
-          }
-          break;
+        boolean noMoreDataForPart = (next == null || next.getOffset() >= partEnd);
+        if (noMoreDataForPart && hasEntirePartTo < partEnd && candidateCached != null) {
+          // We are missing a section at the end of the part... copy the start to non-cached.
+          lastUncompressed = copyAndReplaceCandidateToNonCached(
+              candidateCached, partOffset, hasEntirePartTo, cache, singleAlloc);
+          candidateCached = null;
         }
         current = next;
-        boolean wasSplit = (current.getEnd() > partEnd);
-        if (wasSplit) {
+        if (noMoreDataForPart) break; // Done with this part.
+
+        boolean wasSplit = false;
+        if (current.getEnd() > partEnd) {
+          // If the current buffer contains multiple parts, split it.
           current = current.split(partEnd);
+          wasSplit = true;
         }
         if (isDebugTracingEnabled) {
           LOG.info("Processing uncompressed file data at ["
               + current.getOffset() + ", " + current.getEnd() + ")");
         }
-        BufferChunk bc = (BufferChunk)current;
+        BufferChunk curBc = (BufferChunk)current;
         if (!wasSplit && toRelease != null) {
-          toRelease.add(bc.getChunk()); // TODO: is it valid to give zcr the modified 2nd part?
+          toRelease.add(curBc.getChunk()); // TODO: is it valid to give zcr the modified 2nd part?
         }
 
         // Track if we still have the entire part.
         long hadEntirePartTo = hasEntirePartTo;
-        if (hasEntirePartTo != -1) {
-          hasEntirePartTo = (hasEntirePartTo == current.getOffset()) ? current.getEnd() : -1;
-        }
-        if (candidateCached != null && hasEntirePartTo == -1) {
-          lastUncompressed = copyAndReplaceCandidateToNonCached(
-              candidateCached, partOffset, hadEntirePartTo, cache, singleAlloc);
-          candidateCached = null;
-        }
-
-        if (hasEntirePartTo != -1) {
+        // We have data until the end of current block if we had it until the beginning.
+        hasEntirePartTo = (hasEntirePartTo == current.getOffset()) ? current.getEnd() : -1;
+        if (hasEntirePartTo == -1) {
+          // We don't have the entire part; copy both whatever we intended to cache, and the rest,
+          // to an allocated buffer. We could try to optimize a bit if we have contiguous buffers
+          // with gaps, but it's probably not needed.
+          if (candidateCached != null) {
+            assert hadEntirePartTo != -1;
+            copyAndReplaceCandidateToNonCached(
+                candidateCached, partOffset, hadEntirePartTo, cache, singleAlloc);
+            candidateCached = null;
+          }
+          lastUncompressed = copyAndReplaceUncompressedToNonCached(curBc, cache, singleAlloc);
+          next = lastUncompressed.next; // There may be more data after the gap.
+        } else {
           // So far we have all the data from the beginning of the part.
           if (candidateCached == null) {
-            candidateCached = new UncompressedCacheChunk(bc);
+            candidateCached = new UncompressedCacheChunk(curBc);
           } else {
-            candidateCached.addChunk(bc);
+            candidateCached.addChunk(curBc);
           }
-          // We will take care of this at the end of the part, or if we find a gap.
           next = current.next;
-          continue;
         }
-        // We don't have the entire part; just copy to an allocated buffer. We could try to
-        // optimize a bit if we have contiguous buffers with gaps, but it's probably not needed.
-        lastUncompressed = copyAndReplaceUncompressedToNonCached(bc, cache, singleAlloc);
-        next = lastUncompressed.next;
       }
       if (candidateCached != null) {
         if (toCache == null) {
@@ -908,6 +909,16 @@ class EncodedReaderImpl implements EncodedReader {
     return lastUncompressed;
   }
 
+
+  private int determineUncompressedPartSize() {
+    // We will break the uncompressed data in the cache in the chunks that are the size
+    // of the prevalent ORC compression buffer (the default), or maximum allocation (since we
+    // cannot allocate bigger chunks), whichever is less.
+    long orcCbSizeDefault = ((Number)OrcConf.BUFFER_SIZE.getDefaultValue()).longValue();
+    int maxAllocSize = cache.getAllocator().getMaxAllocation();
+    return (int)Math.min(maxAllocSize, orcCbSizeDefault);
+  }
+
   private static void copyUncompressedChunk(ByteBuffer src, ByteBuffer dest) {
     int startPos = dest.position(), startLim = dest.limit();
     dest.put(src); // Copy uncompressed data to cache.


[35/55] [abbrv] hive git commit: HIVE-12327: WebHCat e2e tests TestJob_1 and TestJob_2 fail (Daniel Dai, reviewed by Thejas Nair)

Posted by jx...@apache.org.
HIVE-12327: WebHCat e2e tests TestJob_1 and TestJob_2 fail (Daniel Dai, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6eaad6be
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6eaad6be
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6eaad6be

Branch: refs/heads/master-fixed
Commit: 6eaad6bea6513ec6005b152d59d51d8a6d744efc
Parents: 3228ba7
Author: Daniel Dai <da...@hortonworks.com>
Authored: Tue Nov 3 21:29:46 2015 -0800
Committer: Daniel Dai <da...@hortonworks.com>
Committed: Tue Nov 3 21:29:46 2015 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6eaad6be/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java
----------------------------------------------------------------------
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java b/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java
index 288043f..b020ffe 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java
@@ -41,6 +41,7 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
+import java.util.regex.Pattern;
 
 public class WebHCatJTShim23 implements WebHCatJTShim {
   private static final Logger LOG = LoggerFactory.getLogger(WebHCatJTShim23.class);
@@ -139,7 +140,8 @@ public class WebHCatJTShim23 implements WebHCatJTShim {
     }
     catch(IOException ex) {
       String msg = ex.getMessage();
-      if(msg != null && msg.contains("ApplicationNotFoundException")) {
+      if(msg != null && (msg.contains("ApplicationNotFoundException") ||
+          Pattern.compile("History file.*not found").matcher(msg).find())) {
         LOG.info("Job(" + jobid + ") not found: " + msg);
         return null;
       }


[49/55] [abbrv] hive git commit: HIVE-12207 : Query fails when non-ascii characters are used in string literals (Aleksei Statkevich via Ashutosh Chauhan)

Posted by jx...@apache.org.
HIVE-12207 : Query fails when non-ascii characters are used in string literals (Aleksei Statkevich via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e1b3b3f5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e1b3b3f5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e1b3b3f5

Branch: refs/heads/master-fixed
Commit: e1b3b3f51206f9505439d7fb1efcecdb53903803
Parents: e8c8a33
Author: Aleksei Statkevich <me...@gmail.com>
Authored: Sat Oct 17 23:37:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Nov 5 14:16:31 2015 -0800

----------------------------------------------------------------------
 .../calcite/translator/RexNodeConverter.java    | 13 ++++++++---
 .../queries/clientpositive/non_ascii_literal1.q |  1 +
 .../queries/clientpositive/non_ascii_literal2.q |  5 +++++
 .../clientpositive/non_ascii_literal1.q.out     |  9 ++++++++
 .../clientpositive/non_ascii_literal2.q.out     | 23 ++++++++++++++++++++
 5 files changed, 48 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e1b3b3f5/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index d315497..631a4ca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -38,11 +38,14 @@ import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.sql.SqlCollation;
 import org.apache.calcite.sql.SqlIntervalQualifier;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlCastFunction;
 import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.ConversionUtil;
+import org.apache.calcite.util.NlsString;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.type.Decimal128;
@@ -301,6 +304,10 @@ public class RexNodeConverter {
   private static final BigInteger MIN_LONG_BI = BigInteger.valueOf(Long.MIN_VALUE),
       MAX_LONG_BI = BigInteger.valueOf(Long.MAX_VALUE);
 
+  private static NlsString asUnicodeString(String text) {
+    return new NlsString(text, ConversionUtil.NATIVE_UTF16_CHARSET_NAME, SqlCollation.IMPLICIT);
+  }
+
   protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticException {
     RexBuilder rexBuilder = cluster.getRexBuilder();
     RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory();
@@ -377,16 +384,16 @@ public class RexNodeConverter {
       if (value instanceof HiveChar) {
         value = ((HiveChar) value).getValue();
       }
-      calciteLiteral = rexBuilder.makeLiteral((String) value);
+      calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
       break;
     case VARCHAR:
       if (value instanceof HiveVarchar) {
         value = ((HiveVarchar) value).getValue();
       }
-      calciteLiteral = rexBuilder.makeLiteral((String) value);
+      calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
       break;
     case STRING:
-      calciteLiteral = rexBuilder.makeLiteral((String) value);
+      calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
       break;
     case DATE:
       Calendar cal = new GregorianCalendar();

http://git-wip-us.apache.org/repos/asf/hive/blob/e1b3b3f5/ql/src/test/queries/clientpositive/non_ascii_literal1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/non_ascii_literal1.q b/ql/src/test/queries/clientpositive/non_ascii_literal1.q
new file mode 100644
index 0000000..9573653
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/non_ascii_literal1.q
@@ -0,0 +1 @@
+select concat("Абвгде", "谢谢") from src limit 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/e1b3b3f5/ql/src/test/queries/clientpositive/non_ascii_literal2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/non_ascii_literal2.q b/ql/src/test/queries/clientpositive/non_ascii_literal2.q
new file mode 100644
index 0000000..6b25273
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/non_ascii_literal2.q
@@ -0,0 +1,5 @@
+create table non_ascii_literal2 as
+select "谢谢" as col1, "Абвгде" as col2;
+
+select * from non_ascii_literal2
+where col2 = "Абвгде";

http://git-wip-us.apache.org/repos/asf/hive/blob/e1b3b3f5/ql/src/test/results/clientpositive/non_ascii_literal1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/non_ascii_literal1.q.out b/ql/src/test/results/clientpositive/non_ascii_literal1.q.out
new file mode 100644
index 0000000..5b28f4e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/non_ascii_literal1.q.out
@@ -0,0 +1,9 @@
+PREHOOK: query: select concat("Абвгде", "谢谢") from src limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select concat("Абвгде", "谢谢") from src limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+Абвгде谢谢

http://git-wip-us.apache.org/repos/asf/hive/blob/e1b3b3f5/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/non_ascii_literal2.q.out b/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
new file mode 100644
index 0000000..7e19143
--- /dev/null
+++ b/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
@@ -0,0 +1,23 @@
+PREHOOK: query: create table non_ascii_literal2 as
+select "谢谢" as col1, "Абвгде" as col2
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: database:default
+PREHOOK: Output: default@non_ascii_literal2
+POSTHOOK: query: create table non_ascii_literal2 as
+select "谢谢" as col1, "Абвгде" as col2
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@non_ascii_literal2
+PREHOOK: query: select * from non_ascii_literal2
+where col2 = "Абвгде"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@non_ascii_literal2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from non_ascii_literal2
+where col2 = "Абвгде"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@non_ascii_literal2
+#### A masked pattern was here ####
+谢谢	Абвгде


[44/55] [abbrv] hive git commit: HIVE-12252 Streaming API HiveEndPoint can be created w/o partitionVals for partitioned table (Wei Zheng via Eugene Koifman)

Posted by jx...@apache.org.
HIVE-12252 Streaming API HiveEndPoint can be created w/o partitionVals for partitioned table (Wei Zheng via Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3511df74
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3511df74
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3511df74

Branch: refs/heads/master-fixed
Commit: 3511df74d0ba982a747046881986873f9b1f872d
Parents: dff2538
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Thu Nov 5 10:07:30 2015 -0800
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Thu Nov 5 10:07:30 2015 -0800

----------------------------------------------------------------------
 .../hcatalog/streaming/ConnectionError.java     |  4 ++
 .../hive/hcatalog/streaming/HiveEndPoint.java   | 51 +++++++++++++++-----
 .../hive/hcatalog/streaming/TestStreaming.java  | 35 +++++++++++---
 3 files changed, 71 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3511df74/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
index 1aeef76..ffa51c9 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
@@ -20,6 +20,10 @@ package org.apache.hive.hcatalog.streaming;
 
 public class ConnectionError extends StreamingException {
 
+  public ConnectionError(String msg) {
+    super(msg);
+  }
+
   public ConnectionError(String msg, Exception innerEx) {
     super(msg, innerEx);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/3511df74/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index 306c93d..2f2d44a 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -279,23 +279,48 @@ public class HiveEndPoint {
       }
     }
 
-    private void checkEndPoint(HiveEndPoint endPoint, IMetaStoreClient msClient) throws InvalidTable {
-      // 1 - check if TBLPROPERTIES ('transactional'='true') is set on table
+    /**
+     * Checks the validity of endpoint
+     * @param endPoint the HiveEndPoint to be checked
+     * @param msClient the metastore client
+     * @throws InvalidTable
+     */
+    private void checkEndPoint(HiveEndPoint endPoint, IMetaStoreClient msClient)
+        throws InvalidTable, ConnectionError {
+      Table t;
       try {
-        Table t = msClient.getTable(endPoint.database, endPoint.table);
-        Map<String, String> params = t.getParameters();
-        if(params != null) {
-          String transactionalProp = params.get("transactional");
-          if (transactionalProp != null && transactionalProp.equalsIgnoreCase("true")) {
-            return;
-          }
-        }
-        LOG.error("'transactional' property is not set on Table " + endPoint);
-        throw new InvalidTable(endPoint.database, endPoint.table, "\'transactional\' property is not set on Table");
+        t = msClient.getTable(endPoint.database, endPoint.table);
       } catch (Exception e) {
-        LOG.warn("Unable to check if Table is transactional. " + endPoint, e);
+        LOG.warn("Unable to check the endPoint: " + endPoint, e);
         throw new InvalidTable(endPoint.database, endPoint.table, e);
       }
+
+      // 1 - check if TBLPROPERTIES ('transactional'='true') is set on table
+      Map<String, String> params = t.getParameters();
+      if (params != null) {
+        String transactionalProp = params.get("transactional");
+        if (transactionalProp == null || !transactionalProp.equalsIgnoreCase("true")) {
+          LOG.error("'transactional' property is not set on Table " + endPoint);
+          throw new InvalidTable(endPoint.database, endPoint.table, "\'transactional\' property" +
+              " is not set on Table");          }
+      }
+
+      // 2 - check if partitionvals are legitimate
+      if (t.getPartitionKeys() != null && !t.getPartitionKeys().isEmpty()
+          && endPoint.partitionVals.isEmpty()) {
+        // Invalid if table is partitioned, but endPoint's partitionVals is empty
+        String errMsg = "HiveEndPoint " + endPoint + " doesn't specify any partitions for " +
+            "partitioned table";
+        LOG.error(errMsg);
+        throw new ConnectionError(errMsg);
+      }
+      if ((t.getPartitionKeys() == null || t.getPartitionKeys().isEmpty())
+          && !endPoint.partitionVals.isEmpty()) {
+        // Invalid if table is not partitioned, but endPoint's partitionVals is not empty
+        String errMsg = "HiveEndPoint" + endPoint + " specifies partitions for unpartitioned table";
+        LOG.error(errMsg);
+        throw new ConnectionError(errMsg);
+      }
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hive/blob/3511df74/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index d9a7eae..58cfbaa 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -204,7 +204,7 @@ public class TestStreaming {
 
     dropDB(msClient, dbName2);
     String loc2 = dbFolder.newFolder(dbName2 + ".db").toString();
-    partLoc2 = createDbAndTable(driver, dbName2, tblName2, partitionVals, colNames, colTypes, bucketCols, partNames, loc2, 2);
+    partLoc2 = createDbAndTable(driver, dbName2, tblName2, null, colNames, colTypes, bucketCols, null, loc2, 2);
 
     String loc3 = dbFolder.newFolder("testing5.db").toString();
     createStoreSales("testing5", loc3);
@@ -477,15 +477,38 @@ public class TestStreaming {
 
   @Test
   public void testEndpointConnection() throws Exception {
-    // 1) Basic
-    HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName, tblName
-            , partitionVals);
+    // For partitioned table, partitionVals are specified
+    HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName, tblName, partitionVals);
     StreamingConnection connection = endPt.newConnection(false, null); //shouldn't throw
     connection.close();
 
-    // 2) Leave partition unspecified
-    endPt = new HiveEndPoint(metaStoreURI, dbName, tblName, null);
+    // For unpartitioned table, partitionVals are not specified
+    endPt = new HiveEndPoint(metaStoreURI, dbName2, tblName2, null);
     endPt.newConnection(false, null).close(); // should not throw
+
+    // For partitioned table, partitionVals are not specified
+    try {
+      endPt = new HiveEndPoint(metaStoreURI, dbName, tblName, null);
+      connection = endPt.newConnection(true);
+      Assert.assertTrue("ConnectionError was not thrown", false);
+      connection.close();
+    } catch (ConnectionError e) {
+      // expecting this exception
+      String errMsg = "doesn't specify any partitions for partitioned table";
+      Assert.assertTrue(e.toString().endsWith(errMsg));
+    }
+
+    // For unpartitioned table, partition values are specified
+    try {
+      endPt = new HiveEndPoint(metaStoreURI, dbName2, tblName2, partitionVals);
+      connection = endPt.newConnection(false);
+      Assert.assertTrue("ConnectionError was not thrown", false);
+      connection.close();
+    } catch (ConnectionError e) {
+      // expecting this exception
+      String errMsg = "specifies partitions for unpartitioned table";
+      Assert.assertTrue(e.toString().endsWith(errMsg));
+    }
   }
 
   @Test


[13/55] [abbrv] hive git commit: HIVE-12318 qtest failing due to NPE in logStats (Jimmmy Xiang via gates)

Posted by jx...@apache.org.
HIVE-12318 qtest failing due to NPE in logStats (Jimmmy Xiang via gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1ad1dc85
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1ad1dc85
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1ad1dc85

Branch: refs/heads/master-fixed
Commit: 1ad1dc8581ae3454237a2b232767e73ec6b74a83
Parents: cdbd1c8
Author: Alan Gates <ga...@hortonworks.com>
Authored: Mon Nov 2 15:40:25 2015 -0800
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Mon Nov 2 15:40:25 2015 -0800

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1ad1dc85/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index e2630ad..9a86a35 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -896,7 +896,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
     if (isLogInfoEnabled && !statsMap.isEmpty()) {
       StringBuilder sb = new StringBuilder();
       for (Map.Entry<String, LongWritable> e : statsMap.entrySet()) {
-        sb.append(e.getKey()).append(":").append(statsMap.get(e).toString()).append(", ");
+        sb.append(e.getKey()).append(":").append(e.getValue()).append(", ");
       }
       LOG.info(sb.toString());
     }


[42/55] [abbrv] hive git commit: HIVE-12223: Filter on Grouping__ID does not work properly (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jx...@apache.org.
HIVE-12223: Filter on Grouping__ID does not work properly (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0f716f16
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0f716f16
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0f716f16

Branch: refs/heads/master-fixed
Commit: 0f716f16fa454bb7b9032f3909398b68ec65c635
Parents: 0a90562
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Thu Oct 22 09:06:30 2015 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu Nov 5 09:35:00 2015 +0100

----------------------------------------------------------------------
 .../hive/ql/ppd/ExprWalkerProcFactory.java      | 12 ++++
 .../clientpositive/groupby_grouping_id3.q       | 22 +++++++
 .../clientpositive/groupby_grouping_id3.q.out   | 60 ++++++++++++++++++++
 3 files changed, 94 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0f716f16/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
index 9bd1847..a09dcef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
@@ -28,6 +28,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
@@ -84,6 +85,17 @@ public final class ExprWalkerProcFactory {
         // replace the output expression with the input expression so that
         // parent op can understand this expression
         ExprNodeDesc exp = op.getColumnExprMap().get(colref.getColumn());
+        // if the operator is a groupby and we are referencing the grouping
+        // id column, we cannot push the predicate
+        if (op instanceof GroupByOperator) {
+          GroupByOperator groupBy = (GroupByOperator) op;
+          if (groupBy.getConf().isGroupingSetsPresent()) {
+            int groupingSetPlaceholderPos = groupBy.getConf().getKeys().size() - 1;
+            if (colref.getColumn().equals(groupBy.getSchema().getColumnNames().get(groupingSetPlaceholderPos))) {
+              exp = null;
+            }
+          }
+        }
         if (exp == null) {
           // means that expression can't be pushed either because it is value in
           // group by

http://git-wip-us.apache.org/repos/asf/hive/blob/0f716f16/ql/src/test/queries/clientpositive/groupby_grouping_id3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_id3.q b/ql/src/test/queries/clientpositive/groupby_grouping_id3.q
new file mode 100644
index 0000000..c6746a8
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/groupby_grouping_id3.q
@@ -0,0 +1,22 @@
+CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1;
+
+set hive.cbo.enable = false;
+
+-- SORT_QUERY_RESULTS
+
+SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1;
+
+set hive.cbo.enable = true;
+
+SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/0f716f16/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out
new file mode 100644
index 0000000..c305bfd
--- /dev/null
+++ b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out
@@ -0,0 +1,60 @@
+PREHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1
+POSTHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t1
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	NULL	1	2
+2	NULL	1	1
+3	NULL	1	2
+4	NULL	1	1
+PREHOOK: query: SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*)
+FROM T1
+GROUP BY key, value
+GROUPING SETS ((), (key))
+HAVING GROUPING__ID = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	NULL	1	2
+2	NULL	1	1
+3	NULL	1	2
+4	NULL	1	1


[21/55] [abbrv] hive git commit: HIVE-12266 When client exists abnormally, it doesn't release ACID locks (Wei Zheng, via Eugene Koifman)

Posted by jx...@apache.org.
HIVE-12266 When client exists abnormally, it doesn't release ACID locks (Wei Zheng, via Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/47617d31
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/47617d31
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/47617d31

Branch: refs/heads/master-fixed
Commit: 47617d31f347a0ba78ebfc903738b39dd960b19b
Parents: d7c0485
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Tue Nov 3 09:03:54 2015 -0800
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Tue Nov 3 09:03:54 2015 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/ql/Driver.java  | 43 +++++++++++++++-----
 1 file changed, 32 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/47617d31/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 18052f3..93c7a54 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -121,12 +121,14 @@ import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.mapred.ClusterStatus;
 import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hive.common.util.ShutdownHookManager;
 
 public class Driver implements CommandProcessor {
 
   static final private String CLASS_NAME = Driver.class.getName();
   private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
   static final private LogHelper console = new LogHelper(LOG);
+  static final int SHUTDOWN_HOOK_PRIORITY = 0;
 
   private int maxRows = 100;
   ByteStream.Output bos = new ByteStream.Output();
@@ -390,7 +392,20 @@ public class Driver implements CommandProcessor {
 
     try {
       // Initialize the transaction manager.  This must be done before analyze is called.
-      SessionState.get().initTxnMgr(conf);
+      final HiveTxnManager txnManager = SessionState.get().initTxnMgr(conf);
+      // In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks
+      ShutdownHookManager.addShutdownHook(
+          new Runnable() {
+            @Override
+            public void run() {
+              try {
+                releaseLocksAndCommitOrRollback(false, txnManager);
+              } catch (LockException e) {
+                LOG.warn("Exception when releasing locks in ShutdownHook for Driver: " +
+                    e.getMessage());
+              }
+            }
+          }, SHUTDOWN_HOOK_PRIORITY);
 
       command = new VariableSubstitution(new HiveVariableSource() {
         @Override
@@ -537,7 +552,7 @@ public class Driver implements CommandProcessor {
    *
    * @param sem semantic analyzer for analyzed query
    * @param plan query plan
-   * @param astStringTree AST tree dump
+   * @param astTree AST tree dump
    * @throws java.io.IOException
    */
   private String getExplainOutput(BaseSemanticAnalyzer sem, QueryPlan plan,
@@ -1049,15 +1064,21 @@ public class Driver implements CommandProcessor {
   /**
    * @param commit if there is an open transaction and if true, commit,
    *               if false rollback.  If there is no open transaction this parameter is ignored.
+   * @param txnManager an optional existing transaction manager retrieved earlier from the session
    *
    **/
-  private void releaseLocksAndCommitOrRollback(boolean commit)
+  private void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnManager)
       throws LockException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
 
-    SessionState ss = SessionState.get();
-    HiveTxnManager txnMgr = ss.getTxnMgr();
+    HiveTxnManager txnMgr;
+    if (txnManager == null) {
+      SessionState ss = SessionState.get();
+      txnMgr = ss.getTxnMgr();
+    } else {
+      txnMgr = txnManager;
+    }
     // If we've opened a transaction we need to commit or rollback rather than explicitly
     // releasing the locks.
     if (txnMgr.isTxnOpen()) {
@@ -1206,7 +1227,7 @@ public class Driver implements CommandProcessor {
     }
     if (ret != 0) {
       try {
-        releaseLocksAndCommitOrRollback(false);
+        releaseLocksAndCommitOrRollback(false, null);
       } catch (LockException e) {
         LOG.warn("Exception in releasing locks. "
             + org.apache.hadoop.util.StringUtils.stringifyException(e));
@@ -1287,7 +1308,7 @@ public class Driver implements CommandProcessor {
         if(plan.getAutoCommitValue() && !txnManager.getAutoCommit()) {
           /*here, if there is an open txn, we want to commit it; this behavior matches
           * https://docs.oracle.com/javase/6/docs/api/java/sql/Connection.html#setAutoCommit(boolean)*/
-          releaseLocksAndCommitOrRollback(true);
+          releaseLocksAndCommitOrRollback(true, null);
           txnManager.setAutoCommit(true);
         }
         else if(!plan.getAutoCommitValue() && txnManager.getAutoCommit()) {
@@ -1315,10 +1336,10 @@ public class Driver implements CommandProcessor {
     //if needRequireLock is false, the release here will do nothing because there is no lock
     try {
       if(txnManager.getAutoCommit() || plan.getOperation() == HiveOperation.COMMIT) {
-        releaseLocksAndCommitOrRollback(true);
+        releaseLocksAndCommitOrRollback(true, null);
       }
       else if(plan.getOperation() == HiveOperation.ROLLBACK) {
-        releaseLocksAndCommitOrRollback(false);
+        releaseLocksAndCommitOrRollback(false, null);
       }
       else {
         //txn (if there is one started) is not finished
@@ -1349,7 +1370,7 @@ public class Driver implements CommandProcessor {
   private CommandProcessorResponse rollback(CommandProcessorResponse cpr) {
     //console.printError(cpr.toString());
     try {
-      releaseLocksAndCommitOrRollback(false);
+      releaseLocksAndCommitOrRollback(false, null);
     }
     catch (LockException e) {
       LOG.error("rollback() FAILED: " + cpr);//make sure not to loose 
@@ -1897,7 +1918,7 @@ public class Driver implements CommandProcessor {
     destroyed = true;
     if (!hiveLocks.isEmpty()) {
       try {
-        releaseLocksAndCommitOrRollback(false);
+        releaseLocksAndCommitOrRollback(false, null);
       } catch (LockException e) {
         LOG.warn("Exception when releasing locking in destroy: " +
             e.getMessage());


[16/55] [abbrv] hive git commit: HIVE-11718 JDBC ResultSet.setFetchSize(0) returns no results (Aleksei Statkevich via Alan Gates)

Posted by jx...@apache.org.
HIVE-11718 JDBC ResultSet.setFetchSize(0) returns no results (Aleksei Statkevich via Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/902a548e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/902a548e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/902a548e

Branch: refs/heads/master-fixed
Commit: 902a548ea5f52481436c2ef99753d8cd34c666dc
Parents: de1fe68
Author: Alan Gates <ga...@hortonworks.com>
Authored: Mon Nov 2 16:14:32 2015 -0800
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Mon Nov 2 16:14:32 2015 -0800

----------------------------------------------------------------------
 jdbc/pom.xml                                    |  8 +++++
 .../org/apache/hive/jdbc/HiveStatement.java     | 14 +++++++--
 .../org/apache/hive/jdbc/HiveStatementTest.java | 31 ++++++++++++++++++++
 3 files changed, 51 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/902a548e/jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index dadf9c3..ea961a4 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -104,6 +104,13 @@
       <version>${hadoop.version}</version>
       <optional>true</optional>
     </dependency>
+    <!-- test inter-project -->
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>${junit.version}</version>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <profiles>
@@ -117,6 +124,7 @@
 
   <build>
     <sourceDirectory>${basedir}/src/java</sourceDirectory>
+    <testSourceDirectory>${basedir}/src/test</testSourceDirectory>
     <resources>
       <resource>
         <directory>${basedir}/src/resources</directory>

http://git-wip-us.apache.org/repos/asf/hive/blob/902a548e/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
index 25456af..180f99e8 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
@@ -53,12 +53,13 @@ import org.slf4j.LoggerFactory;
  */
 public class HiveStatement implements java.sql.Statement {
   public static final Logger LOG = LoggerFactory.getLogger(HiveStatement.class.getName());
+  private static final int DEFAULT_FETCH_SIZE = 1000;
   private final HiveConnection connection;
   private TCLIService.Iface client;
   private TOperationHandle stmtHandle = null;
   private final TSessionHandle sessHandle;
   Map<String,String> sessConf = new HashMap<String,String>();
-  private int fetchSize = 1000;
+  private int fetchSize = DEFAULT_FETCH_SIZE;
   private boolean isScrollableResultset = false;
   /**
    * We need to keep a reference to the result set to support the following:
@@ -673,7 +674,16 @@ public class HiveStatement implements java.sql.Statement {
   @Override
   public void setFetchSize(int rows) throws SQLException {
     checkConnection("setFetchSize");
-    fetchSize = rows;
+    if (rows > 0) {
+      fetchSize = rows;
+    } else if (rows == 0) {
+      // Javadoc for Statement interface states that if the value is zero
+      // then "fetch size" hint is ignored.
+      // In this case it means reverting it to the default value.
+      fetchSize = DEFAULT_FETCH_SIZE;
+    } else {
+      throw new SQLException("Fetch size must be greater or equal to 0");
+    }
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hive/blob/902a548e/jdbc/src/test/org/apache/hive/jdbc/HiveStatementTest.java
----------------------------------------------------------------------
diff --git a/jdbc/src/test/org/apache/hive/jdbc/HiveStatementTest.java b/jdbc/src/test/org/apache/hive/jdbc/HiveStatementTest.java
new file mode 100644
index 0000000..be23b10
--- /dev/null
+++ b/jdbc/src/test/org/apache/hive/jdbc/HiveStatementTest.java
@@ -0,0 +1,31 @@
+package org.apache.hive.jdbc;
+
+import org.junit.Test;
+
+import java.sql.SQLException;
+
+import static org.junit.Assert.assertEquals;
+
+public class HiveStatementTest {
+
+  @Test
+  public void testSetFetchSize1() throws SQLException {
+    HiveStatement stmt = new HiveStatement(null, null, null);
+    stmt.setFetchSize(123);
+    assertEquals(123, stmt.getFetchSize());
+  }
+
+  @Test
+  public void testSetFetchSize2() throws SQLException {
+    HiveStatement stmt = new HiveStatement(null, null, null);
+    int initial = stmt.getFetchSize();
+    stmt.setFetchSize(0);
+    assertEquals(initial, stmt.getFetchSize());
+  }
+
+  @Test(expected = SQLException.class)
+  public void testSetFetchSize3() throws SQLException {
+    HiveStatement stmt = new HiveStatement(null, null, null);
+    stmt.setFetchSize(-1);
+  }
+}


[33/55] [abbrv] hive git commit: HIVE-12063: Pad Decimal numbers with trailing zeros to the scale of the column (reviewed by Szehon)

Posted by jx...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_udf.q.out b/ql/src/test/results/clientpositive/decimal_udf.q.out
index ce1fe3f..abbfc50 100644
--- a/ql/src/test/results/clientpositive/decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/decimal_udf.q.out
@@ -55,44 +55,44 @@ POSTHOOK: query: SELECT key + key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-200
-20
-2
-0.2
-0.02
-400
-40
-4
-0
-0.4
-0.04
-0.6
-0.66
-0.666
--0.6
--0.66
--0.666
-2
-4
-6.28
--2.24
--2.24
--2.244
-2.24
-2.244
-248
-250.4
--2510.98
-6.28
-6.28
-6.28
-2
--2469135780.246913578
-2469135780.24691356
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.2000000000
+0.0200000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.4000000000
+0.0400000000
+0.6000000000
+0.6600000000
+0.6660000000
+-0.6000000000
+-0.6600000000
+-0.6660000000
+2.0000000000
+4.0000000000
+6.2800000000
+-2.2400000000
+-2.2400000000
+-2.2440000000
+2.2400000000
+2.2440000000
+248.0000000000
+250.4000000000
+-2510.9800000000
+6.2800000000
+6.2800000000
+6.2800000000
+2.0000000000
+-2469135780.2469135780
+2469135780.2469135600
 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
@@ -122,44 +122,44 @@ POSTHOOK: query: SELECT key + value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-200
-20
-2
-0.1
-0.01
-400
-40
-4
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-2
-4
-6.14
--2.12
--2.12
--12.122
-2.12
-2.122
-248
-250.2
--2510.49
-6.14
-6.14
-7.14
-2
--2469135780.123456789
-2469135780.12345678
+0.0000000000
+0.0000000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.1000000000
+0.0100000000
+400.0000000000
+40.0000000000
+4.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+2.0000000000
+4.0000000000
+6.1400000000
+-2.1200000000
+-2.1200000000
+-12.1220000000
+2.1200000000
+2.1220000000
+248.0000000000
+250.2000000000
+-2510.4900000000
+6.1400000000
+6.1400000000
+7.1400000000
+2.0000000000
+-2469135780.1234567890
+2469135780.1234567800
 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
@@ -325,44 +325,44 @@ POSTHOOK: query: SELECT key - key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-0
+0.0000000000
 NULL
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
-0
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
@@ -392,44 +392,44 @@ POSTHOOK: query: SELECT key - value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--8800
+-8800.0000000000
 NULL
-0
-0
-0
-0
-0
-0.1
-0.01
-0
-0
-0
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-0
-0
-0.14
--0.12
--0.12
-9.878
-0.12
-0.122
-0
-0.2
--0.49
-0.14
-0.14
--0.86
-0
--0.123456789
-0.12345678
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.1000000000
+0.0100000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.0000000000
+0.0000000000
+0.1400000000
+-0.1200000000
+-0.1200000000
+9.8780000000
+0.1200000000
+0.1220000000
+0.0000000000
+0.2000000000
+-0.4900000000
+0.1400000000
+0.1400000000
+-0.8600000000
+0.0000000000
+-0.1234567890
+0.1234567800
 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
@@ -595,42 +595,42 @@ POSTHOOK: query: SELECT key * key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-19360000
+19360000.00000000000000000000
 NULL
-0
-0
-10000
-100
-1
-0.01
-0.0001
-40000
-400
-4
-0
-0.04
-0.0004
-0.09
-0.1089
-0.110889
-0.09
-0.1089
-0.110889
-1
-4
-9.8596
-1.2544
-1.2544
-1.258884
-1.2544
-1.258884
-15376
-15675.04
-1576255.1401
-9.8596
-9.8596
-9.8596
-1
+0.00000000000000000000
+0.00000000000000000000
+10000.00000000000000000000
+100.00000000000000000000
+1.00000000000000000000
+0.01000000000000000000
+0.00010000000000000000
+40000.00000000000000000000
+400.00000000000000000000
+4.00000000000000000000
+0.00000000000000000000
+0.04000000000000000000
+0.00040000000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+0.09000000000000000000
+0.10890000000000000000
+0.11088900000000000000
+1.00000000000000000000
+4.00000000000000000000
+9.85960000000000000000
+1.25440000000000000000
+1.25440000000000000000
+1.25888400000000000000
+1.25440000000000000000
+1.25888400000000000000
+15376.00000000000000000000
+15675.04000000000000000000
+1576255.14010000000000000000
+9.85960000000000000000
+9.85960000000000000000
+9.85960000000000000000
+1.00000000000000000000
 NULL
 NULL
 PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0
@@ -665,29 +665,29 @@ POSTHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-100	100
-10	10
-1	1
-200	200
-20	20
-2	2
-1	1
-2	2
-3.14	3
--1.12	-1
--1.12	-1
--1.122	-11
-1.12	1
-1.122	1
-124	124
-125.2	125
--1255.49	-1255
-3.14	3
-3.14	3
-3.14	4
-1	1
--1234567890.123456789	-1234567890
-1234567890.12345678	1234567890
+100.0000000000	100
+10.0000000000	10
+1.0000000000	1
+200.0000000000	200
+20.0000000000	20
+2.0000000000	2
+1.0000000000	1
+2.0000000000	2
+3.1400000000	3
+-1.1200000000	-1
+-1.1200000000	-1
+-1.1220000000	-11
+1.1200000000	1
+1.1220000000	1
+124.0000000000	124
+125.2000000000	125
+-1255.4900000000	-1255
+3.1400000000	3
+3.1400000000	3
+3.1400000000	4
+1.0000000000	1
+-1234567890.1234567890	-1234567890
+1234567890.1234567800	1234567890
 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
@@ -717,44 +717,44 @@ POSTHOOK: query: SELECT key * value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--19360000
+-19360000.0000000000
 NULL
-0
-0
-10000
-100
-1
-0
-0
-40000
-400
-4
-0
-0
-0
-0
-0
-0
-0
-0
-0
-1
-4
-9.42
-1.12
-1.12
-12.342
-1.12
-1.122
-15376
-15650
-1575639.95
-9.42
-9.42
-12.56
-1
-1524157875171467887.50190521
-1524157875171467876.3907942
+0.0000000000
+0.0000000000
+10000.0000000000
+100.0000000000
+1.0000000000
+0.0000000000
+0.0000000000
+40000.0000000000
+400.0000000000
+4.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+0.0000000000
+1.0000000000
+4.0000000000
+9.4200000000
+1.1200000000
+1.1200000000
+12.3420000000
+1.1200000000
+1.1220000000
+15376.0000000000
+15650.0000000000
+1575639.9500000000
+9.4200000000
+9.4200000000
+12.5600000000
+1.0000000000
+1524157875171467887.5019052100
+1524157875171467876.3907942000
 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
@@ -989,40 +989,40 @@ POSTHOOK: query: SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
+1.000000000000000000000000
 PREHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
@@ -1055,30 +1055,30 @@ POSTHOOK: query: SELECT key / value FROM DECIMAL_UDF WHERE value is not null and
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1
-1
-1
-1
-1
-1
-1
-1
-1
+-1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
+1.000000000000000000000
 1.046666666666666666667
-1.12
-1.12
-0.102
-1.12
-1.122
-1
-1.0016
+1.120000000000000000000
+1.120000000000000000000
+0.102000000000000000000
+1.120000000000000000000
+1.122000000000000000000
+1.000000000000000000000
+1.001600000000000000000
 1.000390438247011952191
 1.046666666666666666667
 1.046666666666666666667
-0.785
-1
-1.0000000001
-1.00000000009999999271
+0.785000000000000000000
+1.000000000000000000000
+1.000000000100000000000
+1.000000000099999992710
 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
@@ -1233,44 +1233,44 @@ POSTHOOK: query: SELECT abs(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
-0.3
-0.33
-0.333
-1
-2
-3.14
-1.12
-1.12
-1.122
-1.12
-1.122
-124
-125.2
-1255.49
-3.14
-3.14
-3.14
-1
-1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- avg
 EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
@@ -1359,23 +1359,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890	-1234567890.123456789	-1234567890.123456789	-1234567890.123456789
--1255	-1255.49	-1255.49	-1255.49
--11	-1.122	-1.122	-1.122
--1	-1.12	-1.12	-2.24
-0	0.02538461538461538461538	0.02538461538462	0.33
-1	1.0484	1.0484	5.242
-2	2	2	4
-3	3.14	3.14	9.42
-4	3.14	3.14	3.14
-10	10	10	10
-20	20	20	20
-100	100	100	100
-124	124	124	124
-125	125.2	125.2	125.2
-200	200	200	200
-4400	-4400	-4400	-4400
-1234567890	1234567890.12345678	1234567890.12345678	1234567890.12345678
+-1234567890	-1234567890.12345678900000000000000	-1234567890.12345678900000	-1234567890.1234567890
+-1255	-1255.49000000000000000000000	-1255.49000000000000	-1255.4900000000
+-11	-1.12200000000000000000000	-1.12200000000000	-1.1220000000
+-1	-1.12000000000000000000000	-1.12000000000000	-2.2400000000
+0	0.02538461538461538461538	0.02538461538462	0.3300000000
+1	1.04840000000000000000000	1.04840000000000	5.2420000000
+2	2.00000000000000000000000	2.00000000000000	4.0000000000
+3	3.14000000000000000000000	3.14000000000000	9.4200000000
+4	3.14000000000000000000000	3.14000000000000	3.1400000000
+10	10.00000000000000000000000	10.00000000000000	10.0000000000
+20	20.00000000000000000000000	20.00000000000000	20.0000000000
+100	100.00000000000000000000000	100.00000000000000	100.0000000000
+124	124.00000000000000000000000	124.00000000000000	124.0000000000
+125	125.20000000000000000000000	125.20000000000000	125.2000000000
+200	200.00000000000000000000000	200.00000000000000	200.0000000000
+4400	-4400.00000000000000000000000	-4400.00000000000000	-4400.0000000000
+1234567890	1234567890.12345678000000000000000	1234567890.12345678000000	1234567890.1234567800
 PREHOOK: query: -- negative
 EXPLAIN SELECT -key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1407,44 +1407,44 @@ POSTHOOK: query: SELECT -key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-4400
+4400.0000000000
 NULL
-0
-0
--100
--10
--1
--0.1
--0.01
--200
--20
--2
-0
--0.2
--0.02
--0.3
--0.33
--0.333
-0.3
-0.33
-0.333
--1
--2
--3.14
-1.12
-1.12
-1.122
--1.12
--1.122
--124
--125.2
-1255.49
--3.14
--3.14
--3.14
--1
-1234567890.123456789
--1234567890.12345678
+0.0000000000
+0.0000000000
+-100.0000000000
+-10.0000000000
+-1.0000000000
+-0.1000000000
+-0.0100000000
+-200.0000000000
+-20.0000000000
+-2.0000000000
+0.0000000000
+-0.2000000000
+-0.0200000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+0.3000000000
+0.3300000000
+0.3330000000
+-1.0000000000
+-2.0000000000
+-3.1400000000
+1.1200000000
+1.1200000000
+1.1220000000
+-1.1200000000
+-1.1220000000
+-124.0000000000
+-125.2000000000
+1255.4900000000
+-3.1400000000
+-3.1400000000
+-3.1400000000
+-1.0000000000
+1234567890.1234567890
+-1234567890.1234567800
 PREHOOK: query: -- positive
 EXPLAIN SELECT +key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1476,44 +1476,44 @@ POSTHOOK: query: SELECT +key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.0000000000
 NULL
-0
-0
-100
-10
-1
-0.1
-0.01
-200
-20
-2
-0
-0.2
-0.02
-0.3
-0.33
-0.333
--0.3
--0.33
--0.333
-1
-2
-3.14
--1.12
--1.12
--1.122
-1.12
-1.122
-124
-125.2
--1255.49
-3.14
-3.14
-3.14
-1
--1234567890.123456789
-1234567890.12345678
+0.0000000000
+0.0000000000
+100.0000000000
+10.0000000000
+1.0000000000
+0.1000000000
+0.0100000000
+200.0000000000
+20.0000000000
+2.0000000000
+0.0000000000
+0.2000000000
+0.0200000000
+0.3000000000
+0.3300000000
+0.3330000000
+-0.3000000000
+-0.3300000000
+-0.3330000000
+1.0000000000
+2.0000000000
+3.1400000000
+-1.1200000000
+-1.1200000000
+-1.1220000000
+1.1200000000
+1.1220000000
+124.0000000000
+125.2000000000
+-1255.4900000000
+3.1400000000
+3.1400000000
+3.1400000000
+1.0000000000
+-1234567890.1234567890
+1234567890.1234567800
 PREHOOK: query: -- ceiling
 EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -1683,42 +1683,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--4400
+-4400.00
 NULL
-0
-0
-100
-10
-1
-0.1
+0.00
+0.00
+100.00
+10.00
+1.00
+0.10
 0.01
-200
-20
-2
-0
-0.2
+200.00
+20.00
+2.00
+0.00
+0.20
 0.02
-0.3
+0.30
 0.33
 0.33
--0.3
+-0.30
 -0.33
 -0.33
-1
-2
+1.00
+2.00
 3.14
 -1.12
 -1.12
 -1.12
 1.12
 1.12
-124
-125.2
+124.00
+125.20
 -1255.49
 3.14
 3.14
 3.14
-1
+1.00
 -1234567890.12
 1234567890.12
 PREHOOK: query: -- power
@@ -1821,44 +1821,44 @@ POSTHOOK: query: SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--2199
+-2199.000000000000
 NULL
 NULL
 NULL
-1
-1
-0
-0
-0
-1
-1
-0
+1.000000000000
+1.000000000000
+0.000000000000
+0.000000000000
+0.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
 NULL
-0
-0
-0.1
-0.01
-0.001
-0.1
-0.01
-0.001
-0
-0
-1
--0.12
--0.12
--0.122
-0.44
-0.439
-1
-1
--626.745
-1
-1
-1
-0
--617283944.0617283945
-1
+0.000000000000
+0.000000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.100000000000
+0.010000000000
+0.001000000000
+0.000000000000
+0.000000000000
+1.000000000000
+-0.120000000000
+-0.120000000000
+-0.122000000000
+0.440000000000
+0.439000000000
+1.000000000000
+1.000000000000
+-626.745000000000
+1.000000000000
+1.000000000000
+1.000000000000
+0.000000000000
+-617283944.061728394500
+1.000000000000
 PREHOOK: query: -- stddev, var
 EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value
 PREHOOK: type: QUERY
@@ -2134,7 +2134,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
--1234567890.123456789
+-1234567890.1234567890
 PREHOOK: query: -- max
 EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
@@ -2193,7 +2193,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
-1234567890.12345678
+1234567890.1234567800
 PREHOOK: query: -- count
 EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
index 1f613c4..f7a9853 100644
--- a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
+++ b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
@@ -31,11 +31,11 @@ POSTHOOK: query: select * from sample_06 where gpa = 3.00
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@sample_06
 #### A masked pattern was here ####
-aaa	35	3
-bbb	32	3
-ccc	32	3
-ddd	35	3
-eee	32	3
+aaa	35	3.00
+bbb	32	3.00
+ccc	32	3.00
+ddd	35	3.00
+eee	32	3.00
 PREHOOK: query: create table tab1 (name varchar(50), age int, gpa decimal(3, 2))
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -63,8 +63,8 @@ POSTHOOK: query: select * from tab1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab1
 #### A masked pattern was here ####
-aaa	35	3
-bbb	32	3
-ccc	32	3
-ddd	35	3
-eee	32	3
+aaa	35	3.00
+bbb	32	3.00
+ccc	32	3.00
+ddd	35	3.00
+eee	32	3.00

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
index 1586f8a..36a032a 100644
--- a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
@@ -1320,105 +1320,105 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_mapjoin
 #### A masked pattern was here ####
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	6984454.211097692
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	6984454.21109769200000
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1508,105 +1508,105 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_mapjoin
 #### A masked pattern was here ####
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	-617.5607769230769
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	6984454.211097692
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
-6981	6981	5831542.269248378	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	-617.56077692307690
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	6984454.21109769200000
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
+6981	6981	5831542.2692483780	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	-617.5607769230769
-6981	6981	NULL	-617.5607769230769
+6981	6981	NULL	-617.56077692307690
+6981	6981	NULL	-617.56077692307690
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	NULL	6984454.211097692
+6981	6981	NULL	6984454.21109769200000
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
 6981	6981	NULL	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	-617.5607769230769
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	6984454.211097692
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
-6981	6981	-515.621072973	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	-617.56077692307690
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	6984454.21109769200000
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
+6981	6981	-515.6210729730	NULL
 PREHOOK: query: DROP TABLE decimal_mapjoin
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@decimal_mapjoin

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
index 98d9ceb..4c8b295 100644
--- a/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
+++ b/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
@@ -169,112 +169,112 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -285,109 +285,109 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-14	14
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-17	17
-45	45
-45	45
-45	45
-45	45
-45	45
-6	6
-6	6
-6	6
-6	6
-6	6
-6	6
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-62	62
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-64	64
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-70	70
-79	79
-79	79
-79	79
-79	79
-79	79
-79	79
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-89	89
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
-9	9
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+14.00	14
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+17.00	17
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+45.00	45
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+6.00	6
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+62.00	62
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+64.00	64
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+70.00	70
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+79.00	79
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+89.00	89
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9
+9.00	9

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/orc_file_dump.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_file_dump.q.out b/ql/src/test/results/clientpositive/orc_file_dump.q.out
index c494d47..50d5701 100644
--- a/ql/src/test/results/clientpositive/orc_file_dump.q.out
+++ b/ql/src/test/results/clientpositive/orc_file_dump.q.out
@@ -196,7 +196,7 @@ File length: 33458 bytes
 Padding length: 0 bytes
 Padding ratio: 0%
 -- END ORC FILE DUMP --
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: alter table orc_ppd set tblproperties("orc.bloom.filter.fpp"="0.01")
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@orc_ppd
@@ -314,7 +314,7 @@ File length: 38613 bytes
 Padding length: 0 bytes
 Padding ratio: 0%
 -- END ORC FILE DUMP --
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: CREATE TABLE orc_ppd_part(t tinyint,
            si smallint,
            i int,
@@ -444,4 +444,4 @@ File length: 33458 bytes
 Padding length: 0 bytes
 Padding ratio: 0%
 -- END ORC FILE DUMP --
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty	2015	10
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty	2015	10

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
index 0d4cd15..6a528dd 100644
--- a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
@@ -251,7 +251,7 @@ POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc_pred
 #### A masked pattern was here ####
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_pred
@@ -260,7 +260,7 @@ POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc_pred
 #### A masked pattern was here ####
-124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
 PREHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred
   WHERE t IS NOT NULL
   AND t < 0

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/parquet_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_decimal.q.out b/ql/src/test/results/clientpositive/parquet_decimal.q.out
index 493bd4f..a19cd6d 100644
--- a/ql/src/test/results/clientpositive/parquet_decimal.q.out
+++ b/ql/src/test/results/clientpositive/parquet_decimal.q.out
@@ -63,9 +63,9 @@ Mary	4.33
 Cluck	5.96
 Tom	-12.25
 Mary	33.33
-Tom	19
-Beck	0
-Beck	79.9
+Tom	19.00
+Beck	0.00
+Beck	79.90
 PREHOOK: query: SELECT value, count(*) FROM parq_dec GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parq_dec
@@ -75,14 +75,14 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@parq_dec
 #### A masked pattern was here ####
 -12.25	1
-0	1
+0.00	1
 4.33	1
 5.96	1
-19	1
+19.00	1
 33.33	1
 55.71	1
 77.34	1
-79.9	1
+79.90	1
 234.79	1
 PREHOOK: query: TRUNCATE TABLE parq_dec
 PREHOOK: type: TRUNCATETABLE
@@ -158,12 +158,12 @@ POSTHOOK: Input: default@parq_dec1
 77.3
 55.7
 4.3
-6
+6.0
 12.3
 33.3
 0.2
 3.2
-8
+8.0
 PREHOOK: query: DROP TABLE dec
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@dec

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out b/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
index 1355849..6e62ee4 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
@@ -26,11 +26,11 @@ POSTHOOK: query: select * from newtypestbl where b=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b!=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -39,11 +39,11 @@ POSTHOOK: query: select * from newtypestbl where b!=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b<true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -52,11 +52,11 @@ POSTHOOK: query: select * from newtypestbl where b<true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b>true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -73,16 +73,16 @@ POSTHOOK: query: select * from newtypestbl where b<=true sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -91,11 +91,11 @@ POSTHOOK: query: select * from newtypestbl where b=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b!=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -104,11 +104,11 @@ POSTHOOK: query: select * from newtypestbl where b!=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b<false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -125,11 +125,11 @@ POSTHOOK: query: select * from newtypestbl where b>false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b<=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -138,11 +138,11 @@ POSTHOOK: query: select * from newtypestbl where b<=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -151,11 +151,11 @@ POSTHOOK: query: select * from newtypestbl where b=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b!=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -164,11 +164,11 @@ POSTHOOK: query: select * from newtypestbl where b!=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b<true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -177,11 +177,11 @@ POSTHOOK: query: select * from newtypestbl where b<true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b>true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -198,16 +198,16 @@ POSTHOOK: query: select * from newtypestbl where b<=true sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -216,11 +216,11 @@ POSTHOOK: query: select * from newtypestbl where b=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
 PREHOOK: query: select * from newtypestbl where b!=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -229,11 +229,11 @@ POSTHOOK: query: select * from newtypestbl where b!=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b<false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -250,11 +250,11 @@ POSTHOOK: query: select * from newtypestbl where b>false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
-apple     	bee	0.22	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
+apple     	bee	0.220	true
 PREHOOK: query: select * from newtypestbl where b<=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -263,8 +263,8 @@ POSTHOOK: query: select * from newtypestbl where b<=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
-hello     	world	11.22	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false
+hello     	world	11.220	false

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
index f224870..defaa9d 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where c="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where c="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c!="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where c!="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c!="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where c!="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c<"hello"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where c<"hello"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c<"hello"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where c<"hello"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c<="hello" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,16 +106,16 @@ POSTHOOK: query: select * from newtypestbl where c<="hello" sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c<="hello" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -124,16 +124,16 @@ POSTHOOK: query: select * from newtypestbl where c<="hello" sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c="apple "
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,11 +158,11 @@ POSTHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -171,11 +171,11 @@ POSTHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -184,16 +184,16 @@ POSTHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -202,16 +202,16 @@ POSTHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c in ("carrot")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,11 +236,11 @@ POSTHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -249,11 +249,11 @@ POSTHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where c between "apple" and "zombie" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -262,16 +262,16 @@ POSTHOOK: query: select * from newtypestbl where c between "apple" and "zombie"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c between "apple" and "zombie" sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -280,16 +280,16 @@ POSTHOOK: query: select * from newtypestbl where c between "apple" and "zombie"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl

http://git-wip-us.apache.org/repos/asf/hive/blob/3228ba7c/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
index e599014..55231e9 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
@@ -28,11 +28,11 @@ select * from newtypestbl where da='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -41,11 +41,11 @@ POSTHOOK: query: select * from newtypestbl where da='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da= date '1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -54,11 +54,11 @@ POSTHOOK: query: select * from newtypestbl where da= date '1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -67,11 +67,11 @@ POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -80,11 +80,11 @@ POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -93,11 +93,11 @@ POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -106,11 +106,11 @@ POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da!='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -119,11 +119,11 @@ POSTHOOK: query: select * from newtypestbl where da!='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da!='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -132,11 +132,11 @@ POSTHOOK: query: select * from newtypestbl where da!='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da<'1970-02-27'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -145,11 +145,11 @@ POSTHOOK: query: select * from newtypestbl where da<'1970-02-27'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da<'1970-02-27'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -158,11 +158,11 @@ POSTHOOK: query: select * from newtypestbl where da<'1970-02-27'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -171,16 +171,16 @@ POSTHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -189,16 +189,16 @@ POSTHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da<'1970-02-15'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -223,11 +223,11 @@ POSTHOOK: query: select * from newtypestbl where da<='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da<='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -236,11 +236,11 @@ POSTHOOK: query: select * from newtypestbl where da<='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -249,16 +249,16 @@ POSTHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -267,16 +267,16 @@ POSTHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -285,11 +285,11 @@ POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as dat
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -298,11 +298,11 @@ POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as dat
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -311,16 +311,16 @@ POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as dat
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -329,16 +329,16 @@ POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as dat
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -363,11 +363,11 @@ POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '19
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -376,11 +376,11 @@ POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '19
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
 PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -389,16 +389,16 @@ POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '19
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
@@ -407,16 +407,16 @@ POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '19
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-apple     	bee	0.22	1970-02-20
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
-hello     	world	11.22	1970-02-27
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+apple     	bee	0.220	1970-02-20
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
+hello     	world	11.220	1970-02-27
 PREHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl