You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sa...@apache.org on 2019/01/23 10:55:09 UTC

[1/2] hive git commit: HIVE-21078: Replicate column and table level statistics for unpartitioned Hive tables (Ashutosh Bapat, reviewed by Sankar Hariappan)

Repository: hive
Updated Branches:
  refs/heads/master eba9646b4 -> 2ffca04a8


http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
index 8c2988c..75edb63 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
@@ -59,6 +59,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)19);
   private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)20);
   private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)21);
+  private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.STRUCT, (short)22);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -87,6 +88,7 @@ import org.slf4j.LoggerFactory;
   private PrincipalType ownerType; // optional
   private long writeId; // optional
   private boolean isStatsCompliant; // optional
+  private ColumnStatistics colStats; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -114,7 +116,8 @@ import org.slf4j.LoggerFactory;
      */
     OWNER_TYPE((short)19, "ownerType"),
     WRITE_ID((short)20, "writeId"),
-    IS_STATS_COMPLIANT((short)21, "isStatsCompliant");
+    IS_STATS_COMPLIANT((short)21, "isStatsCompliant"),
+    COL_STATS((short)22, "colStats");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -171,6 +174,8 @@ import org.slf4j.LoggerFactory;
           return WRITE_ID;
         case 21: // IS_STATS_COMPLIANT
           return IS_STATS_COMPLIANT;
+        case 22: // COL_STATS
+          return COL_STATS;
         default:
           return null;
       }
@@ -220,7 +225,7 @@ import org.slf4j.LoggerFactory;
   private static final int __WRITEID_ISSET_ID = 6;
   private static final int __ISSTATSCOMPLIANT_ISSET_ID = 7;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.ID,_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.WRITE_ID,_Fields.IS_STATS_COMPLIANT};
+  private static final _Fields optionals[] = {_Fields.ID,_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.WRITE_ID,_Fields.IS_STATS_COMPLIANT,_Fields.COL_STATS};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -269,6 +274,8 @@ import org.slf4j.LoggerFactory;
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.COL_STATS, new org.apache.thrift.meta_data.FieldMetaData("colStats", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT        , "ColumnStatistics")));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap);
   }
@@ -371,6 +378,9 @@ import org.slf4j.LoggerFactory;
     }
     this.writeId = other.writeId;
     this.isStatsCompliant = other.isStatsCompliant;
+    if (other.isSetColStats()) {
+      this.colStats = other.colStats;
+    }
   }
 
   public Table deepCopy() {
@@ -409,6 +419,7 @@ import org.slf4j.LoggerFactory;
 
     setIsStatsCompliantIsSet(false);
     this.isStatsCompliant = false;
+    this.colStats = null;
   }
 
   public long getId() {
@@ -920,6 +931,29 @@ import org.slf4j.LoggerFactory;
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
   }
 
+  public ColumnStatistics getColStats() {
+    return this.colStats;
+  }
+
+  public void setColStats(ColumnStatistics colStats) {
+    this.colStats = colStats;
+  }
+
+  public void unsetColStats() {
+    this.colStats = null;
+  }
+
+  /** Returns true if field colStats is set (has been assigned a value) and false otherwise */
+  public boolean isSetColStats() {
+    return this.colStats != null;
+  }
+
+  public void setColStatsIsSet(boolean value) {
+    if (!value) {
+      this.colStats = null;
+    }
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case ID:
@@ -1090,6 +1124,14 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
+    case COL_STATS:
+      if (value == null) {
+        unsetColStats();
+      } else {
+        setColStats((ColumnStatistics)value);
+      }
+      break;
+
     }
   }
 
@@ -1158,6 +1200,9 @@ import org.slf4j.LoggerFactory;
     case IS_STATS_COMPLIANT:
       return isIsStatsCompliant();
 
+    case COL_STATS:
+      return getColStats();
+
     }
     throw new IllegalStateException();
   }
@@ -1211,6 +1256,8 @@ import org.slf4j.LoggerFactory;
       return isSetWriteId();
     case IS_STATS_COMPLIANT:
       return isSetIsStatsCompliant();
+    case COL_STATS:
+      return isSetColStats();
     }
     throw new IllegalStateException();
   }
@@ -1417,6 +1464,15 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
+    boolean this_present_colStats = true && this.isSetColStats();
+    boolean that_present_colStats = true && that.isSetColStats();
+    if (this_present_colStats || that_present_colStats) {
+      if (!(this_present_colStats && that_present_colStats))
+        return false;
+      if (!this.colStats.equals(that.colStats))
+        return false;
+    }
+
     return true;
   }
 
@@ -1529,6 +1585,11 @@ import org.slf4j.LoggerFactory;
     if (present_isStatsCompliant)
       list.add(isStatsCompliant);
 
+    boolean present_colStats = true && (isSetColStats());
+    list.add(present_colStats);
+    if (present_colStats)
+      list.add(colStats);
+
     return list.hashCode();
   }
 
@@ -1750,6 +1811,16 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetColStats()).compareTo(other.isSetColStats());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColStats()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colStats, other.colStats);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -1923,6 +1994,16 @@ import org.slf4j.LoggerFactory;
       sb.append(this.isStatsCompliant);
       first = false;
     }
+    if (isSetColStats()) {
+      if (!first) sb.append(", ");
+      sb.append("colStats:");
+      if (this.colStats == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.colStats);
+      }
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -2168,6 +2249,15 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 22: // COL_STATS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.colStats = new ColumnStatistics();
+              struct.colStats.read(iprot);
+              struct.setColStatsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -2303,6 +2393,13 @@ import org.slf4j.LoggerFactory;
         oprot.writeBool(struct.isStatsCompliant);
         oprot.writeFieldEnd();
       }
+      if (struct.colStats != null) {
+        if (struct.isSetColStats()) {
+          oprot.writeFieldBegin(COL_STATS_FIELD_DESC);
+          struct.colStats.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -2384,7 +2481,10 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetIsStatsCompliant()) {
         optionals.set(20);
       }
-      oprot.writeBitSet(optionals, 21);
+      if (struct.isSetColStats()) {
+        optionals.set(21);
+      }
+      oprot.writeBitSet(optionals, 22);
       if (struct.isSetId()) {
         oprot.writeI64(struct.id);
       }
@@ -2461,12 +2561,15 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetIsStatsCompliant()) {
         oprot.writeBool(struct.isStatsCompliant);
       }
+      if (struct.isSetColStats()) {
+        struct.colStats.write(oprot);
+      }
     }
 
     @Override
     public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(21);
+      BitSet incoming = iprot.readBitSet(22);
       if (incoming.get(0)) {
         struct.id = iprot.readI64();
         struct.setIdIsSet(true);
@@ -2575,6 +2678,11 @@ import org.slf4j.LoggerFactory;
         struct.isStatsCompliant = iprot.readBool();
         struct.setIsStatsCompliantIsSet(true);
       }
+      if (incoming.get(21)) {
+        struct.colStats = new ColumnStatistics();
+        struct.colStats.read(iprot);
+        struct.setColStatsIsSet(true);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
index 759081c..1ae447d 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
@@ -6842,6 +6842,10 @@ class Table {
    * @var bool
    */
   public $isStatsCompliant = null;
+  /**
+   * @var \metastore\ColumnStatistics
+   */
+  public $colStats = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -6946,6 +6950,11 @@ class Table {
           'var' => 'isStatsCompliant',
           'type' => TType::BOOL,
           ),
+        22 => array(
+          'var' => 'colStats',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\ColumnStatistics',
+          ),
         );
     }
     if (is_array($vals)) {
@@ -7012,6 +7021,9 @@ class Table {
       if (isset($vals['isStatsCompliant'])) {
         $this->isStatsCompliant = $vals['isStatsCompliant'];
       }
+      if (isset($vals['colStats'])) {
+        $this->colStats = $vals['colStats'];
+      }
     }
   }
 
@@ -7208,6 +7220,14 @@ class Table {
             $xfer += $input->skip($ftype);
           }
           break;
+        case 22:
+          if ($ftype == TType::STRUCT) {
+            $this->colStats = new \metastore\ColumnStatistics();
+            $xfer += $this->colStats->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -7360,6 +7380,14 @@ class Table {
       $xfer += $output->writeBool($this->isStatsCompliant);
       $xfer += $output->writeFieldEnd();
     }
+    if ($this->colStats !== null) {
+      if (!is_object($this->colStats)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('colStats', TType::STRUCT, 22);
+      $xfer += $this->colStats->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;
@@ -25928,6 +25956,10 @@ class GetTableRequest {
    * @var string
    */
   public $validWriteIdList = null;
+  /**
+   * @var bool
+   */
+  public $getColumnStats = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -25953,6 +25985,10 @@ class GetTableRequest {
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
           ),
+        7 => array(
+          'var' => 'getColumnStats',
+          'type' => TType::BOOL,
+          ),
         );
     }
     if (is_array($vals)) {
@@ -25971,6 +26007,9 @@ class GetTableRequest {
       if (isset($vals['validWriteIdList'])) {
         $this->validWriteIdList = $vals['validWriteIdList'];
       }
+      if (isset($vals['getColumnStats'])) {
+        $this->getColumnStats = $vals['getColumnStats'];
+      }
     }
   }
 
@@ -26029,6 +26068,13 @@ class GetTableRequest {
             $xfer += $input->skip($ftype);
           }
           break;
+        case 7:
+          if ($ftype == TType::BOOL) {
+            $xfer += $input->readBool($this->getColumnStats);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -26070,6 +26116,11 @@ class GetTableRequest {
       $xfer += $output->writeString($this->validWriteIdList);
       $xfer += $output->writeFieldEnd();
     }
+    if ($this->getColumnStats !== null) {
+      $xfer += $output->writeFieldBegin('getColumnStats', TType::BOOL, 7);
+      $xfer += $output->writeBool($this->getColumnStats);
+      $xfer += $output->writeFieldEnd();
+    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index b237eca..06938b4 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -4791,6 +4791,7 @@ class Table:
    - ownerType
    - writeId
    - isStatsCompliant
+   - colStats
   """
 
   thrift_spec = (
@@ -4816,9 +4817,10 @@ class Table:
     (19, TType.I32, 'ownerType', None,     1, ), # 19
     (20, TType.I64, 'writeId', None, -1, ), # 20
     (21, TType.BOOL, 'isStatsCompliant', None, None, ), # 21
+    (22, TType.STRUCT, 'colStats', (ColumnStatistics, ColumnStatistics.thrift_spec), None, ), # 22
   )
 
-  def __init__(self, id=None, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[15][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[19][4], writeId=thrift_spec[20][4], isStatsCompliant=None,):
+  def __init__(self, id=None, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[15][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[19][4], writeId=thrift_spec[20][4], isStatsCompliant=None, colStats=None,):
     self.id = id
     self.tableName = tableName
     self.dbName = dbName
@@ -4840,6 +4842,7 @@ class Table:
     self.ownerType = ownerType
     self.writeId = writeId
     self.isStatsCompliant = isStatsCompliant
+    self.colStats = colStats
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -4970,6 +4973,12 @@ class Table:
           self.isStatsCompliant = iprot.readBool()
         else:
           iprot.skip(ftype)
+      elif fid == 22:
+        if ftype == TType.STRUCT:
+          self.colStats = ColumnStatistics()
+          self.colStats.read(iprot)
+        else:
+          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -5071,6 +5080,10 @@ class Table:
       oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 21)
       oprot.writeBool(self.isStatsCompliant)
       oprot.writeFieldEnd()
+    if self.colStats is not None:
+      oprot.writeFieldBegin('colStats', TType.STRUCT, 22)
+      self.colStats.write(oprot)
+      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -5101,6 +5114,7 @@ class Table:
     value = (value * 31) ^ hash(self.ownerType)
     value = (value * 31) ^ hash(self.writeId)
     value = (value * 31) ^ hash(self.isStatsCompliant)
+    value = (value * 31) ^ hash(self.colStats)
     return value
 
   def __repr__(self):
@@ -18158,6 +18172,7 @@ class GetTableRequest:
    - capabilities
    - catName
    - validWriteIdList
+   - getColumnStats
   """
 
   thrift_spec = (
@@ -18168,14 +18183,16 @@ class GetTableRequest:
     (4, TType.STRING, 'catName', None, None, ), # 4
     None, # 5
     (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
+    (7, TType.BOOL, 'getColumnStats', None, None, ), # 7
   )
 
-  def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, validWriteIdList=None,):
+  def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, validWriteIdList=None, getColumnStats=None,):
     self.dbName = dbName
     self.tblName = tblName
     self.capabilities = capabilities
     self.catName = catName
     self.validWriteIdList = validWriteIdList
+    self.getColumnStats = getColumnStats
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -18212,6 +18229,11 @@ class GetTableRequest:
           self.validWriteIdList = iprot.readString()
         else:
           iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.BOOL:
+          self.getColumnStats = iprot.readBool()
+        else:
+          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -18242,6 +18264,10 @@ class GetTableRequest:
       oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
       oprot.writeString(self.validWriteIdList)
       oprot.writeFieldEnd()
+    if self.getColumnStats is not None:
+      oprot.writeFieldBegin('getColumnStats', TType.BOOL, 7)
+      oprot.writeBool(self.getColumnStats)
+      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -18260,6 +18286,7 @@ class GetTableRequest:
     value = (value * 31) ^ hash(self.capabilities)
     value = (value * 31) ^ hash(self.catName)
     value = (value * 31) ^ hash(self.validWriteIdList)
+    value = (value * 31) ^ hash(self.getColumnStats)
     return value
 
   def __repr__(self):

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 40ac5a5..e885194 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -1127,6 +1127,7 @@ class Table
   OWNERTYPE = 19
   WRITEID = 20
   ISSTATSCOMPLIANT = 21
+  COLSTATS = 22
 
   FIELDS = {
     ID => {:type => ::Thrift::Types::I64, :name => 'id', :optional => true},
@@ -1149,7 +1150,8 @@ class Table
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
     OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default =>     1, :optional => true, :enum_class => ::PrincipalType},
     WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
-    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
+    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true},
+    COLSTATS => {:type => ::Thrift::Types::STRUCT, :name => 'colStats', :class => ::ColumnStatistics, :optional => true}
   }
 
   def struct_fields; FIELDS; end
@@ -4057,13 +4059,15 @@ class GetTableRequest
   CAPABILITIES = 3
   CATNAME = 4
   VALIDWRITEIDLIST = 6
+  GETCOLUMNSTATS = 7
 
   FIELDS = {
     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
     CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true},
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
-    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
+    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
+    GETCOLUMNSTATS => {:type => ::Thrift::Types::BOOL, :name => 'getColumnStats', :optional => true}
   }
 
   def struct_fields; FIELDS; end

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index 35be3c4..eed1428 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.common;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.ArrayList;
 import java.util.Map;
 import java.util.TreeMap;
 
@@ -260,6 +261,7 @@ public class StatsSetupConst {
         stats.columnStats.put(colName, true);
       }
     }
+
     try {
       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
     } catch (JsonProcessingException e) {
@@ -267,6 +269,33 @@ public class StatsSetupConst {
     }
   }
 
+  /**
+   * @param params table/partition parameters
+   * @return the list of column names for which the stats are available.
+   */
+  public static List<String> getColumnsHavingStats(Map<String, String> params) {
+    if (params == null) {
+      // No table/partition params, no statistics available
+      return null;
+    }
+
+    ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+
+    // No stats available.
+    if (stats == null) {
+      return null;
+    }
+
+    List<String> colNames = new ArrayList<String>();
+    for (Map.Entry<String, Boolean> entry : stats.columnStats.entrySet()) {
+      if (entry.getValue()) {
+        colNames.add(entry.getKey());
+      }
+    }
+
+    return colNames;
+  }
+
   public static boolean canColumnStatsMerge(Map<String, String> params, String colName) {
     if (params == null) {
       return false;

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 3d4467d..59b5bcf 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1877,10 +1877,20 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   }
 
   @Override
+  public Table getTable(String dbname, String name, boolean getColumnStats) throws TException {
+    return getTable(getDefaultCatalog(conf), dbname, name, getColumnStats);
+  }
+
+  @Override
   public Table getTable(String catName, String dbName, String tableName) throws TException {
+    return getTable(catName, dbName, tableName, false);
+  }
+
+  public Table getTable(String catName, String dbName, String tableName, boolean getColumnStats) throws TException {
     GetTableRequest req = new GetTableRequest(dbName, tableName);
     req.setCatName(catName);
     req.setCapabilities(version);
+    req.setGetColumnStats(getColumnStats);
     Table t = client.get_table_req(req).getTable();
     return deepCopy(FilterUtils.filterTableIfEnabled(isClientFilterEnabled, filterHook, t));
   }
@@ -1888,10 +1898,17 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   @Override
   public Table getTable(String catName, String dbName, String tableName,
     String validWriteIdList) throws TException {
+    return getTable(catName, dbName, tableName, validWriteIdList, false);
+  }
+
+  @Override
+  public Table getTable(String catName, String dbName, String tableName, String validWriteIdList,
+                        boolean getColumnStats) throws TException {
     GetTableRequest req = new GetTableRequest(dbName, tableName);
     req.setCatName(catName);
     req.setCapabilities(version);
     req.setValidWriteIdList(validWriteIdList);
+    req.setGetColumnStats(getColumnStats);
     Table t = client.get_table_req(req).getTable();
     return deepCopy(FilterUtils.filterTableIfEnabled(isClientFilterEnabled, filterHook, t));
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 158740d..e10cc8c 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -724,6 +724,25 @@ public interface IMetaStoreClient {
       TException, NoSuchObjectException;
 
   /**
+   * Get a table object in the default catalog.
+   *
+   * @param dbName
+   *          The database the table is located in.
+   * @param tableName
+   *          Name of the table to fetch.
+   * @param getColumnStats
+   *          get the column stats, if available, when true
+   * @return An object representing the table.
+   * @throws MetaException
+   *           Could not fetch the table
+   * @throws TException
+   *           A thrift communication error occurred
+   * @throws NoSuchObjectException
+   *           In case the table wasn't found.
+   */
+  Table getTable(String dbName, String tableName, boolean getColumnStats) throws MetaException,
+          TException, NoSuchObjectException;
+  /**
    * Get a table object.
    * @param catName catalog the table is in.
    * @param dbName database the table is in.
@@ -734,8 +753,33 @@ public interface IMetaStoreClient {
    */
   Table getTable(String catName, String dbName, String tableName) throws MetaException, TException;
 
+  /**
+   * Get a table object.
+   * @param catName catalog the table is in.
+   * @param dbName database the table is in.
+   * @param tableName table name.
+   * @param validWriteIdList applicable snapshot
+   * @return table object.
+   * @throws MetaException Something went wrong, usually in the RDBMS.
+   * @throws TException general thrift error.
+   */
   Table getTable(String catName, String dbName, String tableName,
                         String validWriteIdList) throws TException;
+
+  /**
+   * Get a table object.
+   * @param catName catalog the table is in.
+   * @param dbName database the table is in.
+   * @param tableName table name.
+   * @param validWriteIdList applicable snapshot
+   * @param getColumnStats get the column stats, if available, when true
+   * @return table object.
+   * @throws MetaException Something went wrong, usually in the RDBMS.
+   * @throws TException general thrift error.
+   */
+  Table getTable(String catName, String dbName, String tableName,
+                 String validWriteIdList, boolean getColumnStats) throws TException;
+
   /**
    * Get tables as objects (rather than just fetching their names).  This is more expensive and
    * should only be used if you actually need all the information about the tables.

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 8bbb9dd..c58015d 100644
--- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -447,7 +447,8 @@ struct Table {
   18: optional string catName,          // Name of the catalog the table is in
   19: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility)
   20: optional i64 writeId=-1,
-  21: optional bool isStatsCompliant
+  21: optional bool isStatsCompliant,
+  22: optional ColumnStatistics colStats // column statistics for table
 }
 
 struct Partition {
@@ -1326,7 +1327,8 @@ struct GetTableRequest {
   2: required string tblName,
   3: optional ClientCapabilities capabilities,
   4: optional string catName,
-  6: optional string validWriteIdList
+  6: optional string validWriteIdList,
+  7: optional bool getColumnStats
 }
 
 struct GetTableResult {

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index eb11e9f..c0b1d87 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -1882,7 +1882,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     private void create_table_core(final RawStore ms, final Table tbl,
         final EnvironmentContext envContext)
             throws AlreadyExistsException, MetaException,
-            InvalidObjectException, NoSuchObjectException {
+            InvalidObjectException, NoSuchObjectException, InvalidInputException {
       create_table_core(ms, tbl, envContext, null, null, null, null, null, null);
     }
 
@@ -1892,7 +1892,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         List<SQLNotNullConstraint> notNullConstraints, List<SQLDefaultConstraint> defaultConstraints,
                                    List<SQLCheckConstraint> checkConstraints)
         throws AlreadyExistsException, MetaException,
-        InvalidObjectException, NoSuchObjectException {
+        InvalidObjectException, NoSuchObjectException, InvalidInputException {
       // To preserve backward compatibility throw MetaException in case of null database
       if (tbl.getDbName() == null) {
         throw new MetaException("Null database name is not allowed");
@@ -2126,18 +2126,28 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
         }
       }
+
+      // If the table has column statistics, update it into the metastore. This feature is used
+      // by replication to replicate table level statistics.
+      if (tbl.isSetColStats()) {
+        // We do not replicate statistics for a transactional table right now and hence we do not
+        // expect a transactional table to have column statistics here. So passing null
+        // validWriteIds is fine for now.
+        updateTableColumnStatsInternal(tbl.getColStats(), null, tbl.getWriteId());
+      }
     }
 
     @Override
     public void create_table(final Table tbl) throws AlreadyExistsException,
-        MetaException, InvalidObjectException {
+        MetaException, InvalidObjectException, InvalidInputException {
       create_table_with_environment_context(tbl, null);
     }
 
     @Override
     public void create_table_with_environment_context(final Table tbl,
         final EnvironmentContext envContext)
-        throws AlreadyExistsException, MetaException, InvalidObjectException {
+        throws AlreadyExistsException, MetaException, InvalidObjectException,
+            InvalidInputException {
       startFunction("create_table", ": " + tbl.toString());
       boolean success = false;
       Exception ex = null;
@@ -2148,7 +2158,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         LOG.warn("create_table_with_environment_context got ", e);
         ex = e;
         throw new InvalidObjectException(e.getMessage());
-      } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+      } catch (MetaException | InvalidObjectException | AlreadyExistsException | InvalidInputException e) {
         ex = e;
         throw e;
       } catch (Exception e) {
@@ -2166,7 +2176,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         List<SQLNotNullConstraint> notNullConstraints,
         List<SQLDefaultConstraint> defaultConstraints,
         List<SQLCheckConstraint> checkConstraints)
-        throws AlreadyExistsException, MetaException, InvalidObjectException {
+        throws AlreadyExistsException, MetaException, InvalidObjectException,
+            InvalidInputException {
       startFunction("create_table", ": " + tbl.toString());
       boolean success = false;
       Exception ex = null;
@@ -2177,7 +2188,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       } catch (NoSuchObjectException e) {
         ex = e;
         throw new InvalidObjectException(e.getMessage());
-      } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+      } catch (MetaException | InvalidObjectException | AlreadyExistsException |
+              InvalidInputException e) {
         ex = e;
         throw e;
       } catch (Exception e) {
@@ -2989,7 +3001,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         NoSuchObjectException {
       String[] parsedDbName = parseDbName(dbname, conf);
       return getTableInternal(
-            parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, null);
+            parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, null, false);
     }
 
     @Override
@@ -2997,11 +3009,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         NoSuchObjectException {
       String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
       return new GetTableResult(getTableInternal(catName, req.getDbName(), req.getTblName(),
-          req.getCapabilities(), req.getValidWriteIdList()));
+          req.getCapabilities(), req.getValidWriteIdList(), req.isGetColumnStats()));
     }
 
     private Table getTableInternal(String catName, String dbname, String name,
-        ClientCapabilities capabilities, String writeIdList)
+        ClientCapabilities capabilities, String writeIdList, boolean getColumnStats)
         throws MetaException, NoSuchObjectException {
       if (isInTest) {
         assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY,
@@ -3012,7 +3024,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       startTableFunction("get_table", catName, dbname, name);
       Exception ex = null;
       try {
-        t = get_table_core(catName, dbname, name, writeIdList);
+        t = get_table_core(catName, dbname, name, writeIdList, getColumnStats);
         if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) {
           assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES,
               "insert-only tables", "get_table_req");
@@ -3065,6 +3077,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final String name,
         final String writeIdList)
         throws MetaException, NoSuchObjectException {
+      return get_table_core(catName, dbname, name, writeIdList, false);
+    }
+
+    public Table get_table_core(final String catName,
+        final String dbname,
+        final String name,
+        final String writeIdList,
+        boolean getColumnStats)
+        throws MetaException, NoSuchObjectException {
       Table t = null;
       try {
         t = getMS().getTable(catName, dbname, name, writeIdList);
@@ -3072,6 +3093,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           throw new NoSuchObjectException(TableName.getQualified(catName, dbname, name) +
             " table not found");
         }
+
+        // If column statistics was requested and is valid fetch it.
+        if (getColumnStats) {
+          ColumnStatistics colStats = getMS().getTableColumnStatistics(catName, dbname, name,
+                  StatsSetupConst.getColumnsHavingStats(t.getParameters()), writeIdList);
+          if (colStats != null) {
+            t.setColStats(colStats);
+          }
+        }
       } catch (Exception e) {
         throwMetaException(e);
       }
@@ -5945,15 +5975,20 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       try {
         parameters = getMS().updateTableColumnStatistics(colStats, validWriteIds, writeId);
         if (parameters != null) {
+          Table tableObj = getMS().getTable(colStats.getStatsDesc().getCatName(),
+                                            colStats.getStatsDesc().getDbName(),
+                                            colStats.getStatsDesc().getTableName(), validWriteIds);
           if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
             MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
                     EventType.UPDATE_TABLE_COLUMN_STAT,
-                    new UpdateTableColumnStatEvent(colStats, parameters, validWriteIds, writeId, this));
+                    new UpdateTableColumnStatEvent(colStats, tableObj, parameters, validWriteIds,
+                            writeId, this));
           }
           if (!listeners.isEmpty()) {
             MetaStoreListenerNotifier.notifyEvent(listeners,
                     EventType.UPDATE_TABLE_COLUMN_STAT,
-                    new UpdateTableColumnStatEvent(colStats, parameters, validWriteIds, writeId, this));
+                    new UpdateTableColumnStatEvent(colStats, tableObj, parameters, validWriteIds,
+                            writeId,this));
           }
         }
         committed = getMS().commitTransaction();

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 65fea87..9f72124 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1246,8 +1246,7 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public Table getTable(String catName, String dbName, String tableName,
-                        String writeIdList)
+  public Table getTable(String catName, String dbName, String tableName, String writeIdList)
       throws MetaException {
     boolean commited = false;
     Table tbl = null;
@@ -1287,6 +1286,7 @@ public class ObjectStore implements RawStore, Configurable {
         rollbackTransaction();
       }
     }
+
     return tbl;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 209e92a..03a116a 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -214,7 +214,6 @@ public interface RawStore extends Configurable {
    * @param catalogName catalog the table is in.
    * @param dbName database the table is in.
    * @param tableName table name.
-   * @param txnId transaction id of the calling transaction
    * @param writeIdList string format of valid writeId transaction list
    * @return table object, or null if no such table exists (wow it would be nice if we either
    * consistently returned null or consistently threw NoSuchObjectException).

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index bb504b0..7ad4bd2 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -1161,6 +1161,7 @@ public class CachedStore implements RawStore, Configurable {
       return rawStore.getTable(catName, dbName, tblName, validWriteIds);
     }
     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+
     if (tbl == null) {
       // This table is not yet loaded in cache
       // If the prewarm thread is working on this table's database,

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java
index cf23617..3f988bb 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hive.metastore.IHMSHandler;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.Table;
 
 import java.util.List;
 import java.util.Map;
@@ -37,6 +38,7 @@ public class UpdateTableColumnStatEvent extends ListenerEvent {
   private String validWriteIds;
   private long writeId;
   private Map<String, String> parameters;
+  private Table tableObj;
 
   /**
    * @param colStats Columns statistics Info.
@@ -45,13 +47,15 @@ public class UpdateTableColumnStatEvent extends ListenerEvent {
    * @param colStats writeId for the query.
    * @param handler handler that is firing the event
    */
-  public UpdateTableColumnStatEvent(ColumnStatistics colStats, Map<String, String> parameters, String validWriteIds,
+  public UpdateTableColumnStatEvent(ColumnStatistics colStats, Table tableObj,
+                                    Map<String, String> parameters, String validWriteIds,
                                     long writeId, IHMSHandler handler) {
     super(true, handler);
     this.colStats = colStats;
     this.validWriteIds = validWriteIds;
     this.writeId = writeId;
     this.parameters = parameters;
+    this.tableObj = tableObj;
   }
 
   /**
@@ -64,6 +68,7 @@ public class UpdateTableColumnStatEvent extends ListenerEvent {
     this.validWriteIds = null;
     this.writeId = 0;
     this.parameters = null;
+    this.tableObj = null;
   }
 
   public ColumnStatistics getColStats() {
@@ -81,4 +86,8 @@ public class UpdateTableColumnStatEvent extends ListenerEvent {
   public Map<String, String> getTableParameters() {
     return parameters;
   }
+
+  public Table getTableObj() {
+    return tableObj;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java
index 6add6c2..e3a91f9 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java
@@ -287,10 +287,11 @@ public class MessageBuilder {
   }
 
   public JSONUpdateTableColumnStatMessage buildUpdateTableColumnStatMessage(ColumnStatistics colStats,
+                                                                            Table tableObj,
                                                                             Map<String, String> parameters,
                                                                             String validWriteIds, long writeId) {
-    return new JSONUpdateTableColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), colStats, parameters,
-            validWriteIds, writeId);
+    return new JSONUpdateTableColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(),
+            colStats, tableObj, parameters, validWriteIds, writeId);
   }
 
   public JSONDeleteTableColumnStatMessage buildDeleteTableColumnStatMessage(String dbName, String colName) {

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java
index ed8944b..7919b0e 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hive.metastore.messaging;
 
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.Table;
+
 import java.util.Map;
 
 /**
@@ -36,4 +38,6 @@ public abstract class UpdateTableColumnStatMessage extends EventMessage {
   public abstract Long getWriteId();
 
   public abstract Map<String, String> getParameters();
+
+  public abstract Table getTableObject() throws Exception;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java
index d99ef11..c932b7c 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.metastore.messaging.json;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.messaging.MessageBuilder;
 import org.apache.hadoop.hive.metastore.messaging.UpdateTableColumnStatMessage;
 import org.apache.thrift.TException;
@@ -43,6 +44,9 @@ public class JSONUpdateTableColumnStatMessage extends UpdateTableColumnStatMessa
   @JsonProperty
   Map<String, String> parameters;
 
+  @JsonProperty
+  private String tableObjJson;
+
   /**
    * Default constructor, needed for Jackson.
    */
@@ -50,7 +54,8 @@ public class JSONUpdateTableColumnStatMessage extends UpdateTableColumnStatMessa
   }
 
   public JSONUpdateTableColumnStatMessage(String server, String servicePrincipal, Long timestamp,
-                      ColumnStatistics colStats, Map<String, String> parameters, String validWriteIds, long writeId) {
+                      ColumnStatistics colStats, Table tableObj, Map<String, String> parameters,
+                                          String validWriteIds, long writeId) {
     this.timestamp = timestamp;
     this.server = server;
     this.servicePrincipal = servicePrincipal;
@@ -59,6 +64,7 @@ public class JSONUpdateTableColumnStatMessage extends UpdateTableColumnStatMessa
     this.database = colStats.getStatsDesc().getDbName();
     try {
       this.colStatsJson = MessageBuilder.createTableColumnStatJson(colStats);
+      this.tableObjJson = MessageBuilder.createTableObjJson(tableObj);
     } catch (TException e) {
       throw new IllegalArgumentException("Could not serialize JSONUpdateTableColumnStatMessage : ", e);
     }
@@ -95,6 +101,11 @@ public class JSONUpdateTableColumnStatMessage extends UpdateTableColumnStatMessa
   }
 
   @Override
+  public Table getTableObject() throws Exception {
+    return (Table) MessageBuilder.getTObj(tableObjJson, Table.class);
+  }
+
+  @Override
   public String getValidWriteIds() {
     return validWriteIds;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 898a94d..aafdef9 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -3102,12 +3102,24 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
   }
 
   @Override
+  public Table getTable(String catName, String dbName, boolean getColumnStats) throws MetaException,
+          TException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
   public Table getTable(String catName, String dbName, String tableName,
                         String validWriteIdList) throws TException {
     throw new UnsupportedOperationException();
   }
 
   @Override
+  public Table getTable(String catName, String dbName, String tableName,
+                        String validWriteIdList, boolean getColumnStats) throws TException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
   public List<Table> getTableObjectsByName(String catName, String dbName,
                                            List<String> tableNames) throws MetaException,
       InvalidOperationException, UnknownDBException, TException {

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java
index 1b01432..5ba768f 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Date;
 import org.apache.hadoop.hive.metastore.api.DateColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Partition;
@@ -214,6 +215,19 @@ public class TestStats {
         client.getTableColumnStatistics(dbName, tableName, new ArrayList<>(colMap.keySet())) :
         client.getTableColumnStatistics(catName, dbName, tableName, new ArrayList<>(colMap.keySet()));
     compareStatsForOneTableOrPartition(objs, 0, colMap);
+
+    // Test the statistics obtained through getTable call.
+    Table table = catName.equals(NO_CAT) ?
+            client.getTable(dbName, tableName, true) :
+            client.getTable(catName, dbName, tableName, null, true);
+    Assert.assertTrue(table.isSetColStats());
+    compareStatsForOneTableOrPartition(table.getColStats().getStatsObj(), 0, colMap);
+
+    // Test that getTable call doesn't get the statistics when not explicitly requested.
+    table = catName.equals(NO_CAT) ?
+            client.getTable(dbName, tableName, false) :
+            client.getTable(catName, dbName, tableName, null, false);
+    Assert.assertFalse(table.isSetColStats());
   }
 
   private void compareStatsForPartitions(String catName, String dbName, String tableName,
@@ -335,7 +349,13 @@ public class TestStats {
     }
 
     abstract ColumnStatisticsObj generate();
-    abstract void compare(ColumnStatisticsObj obj, int offset);
+    abstract void compare(ColumnStatisticsData colstats, int offset);
+
+    void compare(ColumnStatisticsObj obj, int offset) {
+      compareCommon(obj);
+      compare(obj.getStatsData(), offset);
+    }
+
     abstract void compareAggr(ColumnStatisticsObj obj);
 
     void compareCommon(ColumnStatisticsObj obj) {
@@ -343,6 +363,11 @@ public class TestStats {
       Assert.assertEquals(colType, obj.getColType());
     }
 
+    void compareCommon(FieldSchema col) {
+      Assert.assertEquals(colName, col.getName());
+      Assert.assertEquals(colType, col.getType());
+    }
+
     long genMaxLen() {
       return genPositiveLong(maxLens);
     }
@@ -429,12 +454,11 @@ public class TestStats {
     }
 
     @Override
-    void compare(ColumnStatisticsObj obj, int offset) {
-      compareCommon(obj);
+    void compare(ColumnStatisticsData colstats, int offset) {
       Assert.assertEquals("binary max length", maxLens.get(offset),
-          (Long)obj.getStatsData().getBinaryStats().getMaxColLen());
-      Assert.assertEquals("binary min length", avgLens.get(offset), obj.getStatsData().getBinaryStats().getAvgColLen(), 0.01);
-      Assert.assertEquals("binary num nulls", numNulls.get(offset), (Long)obj.getStatsData().getBinaryStats().getNumNulls());
+          (Long) colstats.getBinaryStats().getMaxColLen());
+      Assert.assertEquals("binary min length", avgLens.get(offset), colstats.getBinaryStats().getAvgColLen(), 0.01);
+      Assert.assertEquals("binary num nulls", numNulls.get(offset), (Long) colstats.getBinaryStats().getNumNulls());
     }
 
     @Override
@@ -465,11 +489,10 @@ public class TestStats {
     }
 
     @Override
-    void compare(ColumnStatisticsObj obj, int offset) {
-      compareCommon(obj);
-      Assert.assertEquals("boolean num trues", numTrues.get(offset), (Long)obj.getStatsData().getBooleanStats().getNumTrues());
-      Assert.assertEquals("boolean num falses", numFalses.get(offset), (Long)obj.getStatsData().getBooleanStats().getNumFalses());
-      Assert.assertEquals("boolean num nulls", numNulls.get(offset), (Long)obj.getStatsData().getBooleanStats().getNumNulls());
+    void compare(ColumnStatisticsData colstats, int offset) {
+      Assert.assertEquals("boolean num trues", numTrues.get(offset), (Long) colstats.getBooleanStats().getNumTrues());
+      Assert.assertEquals("boolean num falses", numFalses.get(offset), (Long) colstats.getBooleanStats().getNumFalses());
+      Assert.assertEquals("boolean num nulls", numNulls.get(offset), (Long) colstats.getBooleanStats().getNumNulls());
     }
 
     @Override
@@ -517,12 +540,11 @@ public class TestStats {
     }
 
     @Override
-    void compare(ColumnStatisticsObj obj, int offset) {
-      compareCommon(obj);
-      Assert.assertEquals("date num nulls", numNulls.get(offset), (Long)obj.getStatsData().getDateStats().getNumNulls());
-      Assert.assertEquals("date num dvs", numDvs.get(offset), (Long)obj.getStatsData().getDateStats().getNumDVs());
-      Assert.assertEquals("date low val", lowVals.get(offset), obj.getStatsData().getDateStats().getLowValue());
-      Assert.assertEquals("date high val", highVals.get(offset), obj.getStatsData().getDateStats().getHighValue());
+    void compare(ColumnStatisticsData colstats, int offset) {
+      Assert.assertEquals("date num nulls", numNulls.get(offset), (Long) colstats.getDateStats().getNumNulls());
+      Assert.assertEquals("date num dvs", numDvs.get(offset), (Long) colstats.getDateStats().getNumDVs());
+      Assert.assertEquals("date low val", lowVals.get(offset), colstats.getDateStats().getLowValue());
+      Assert.assertEquals("date high val", highVals.get(offset), colstats.getDateStats().getHighValue());
     }
 
     @Override
@@ -579,16 +601,15 @@ public class TestStats {
     }
 
     @Override
-    void compare(ColumnStatisticsObj obj, int offset) {
-      compareCommon(obj);
+    void compare(ColumnStatisticsData colstats, int offset) {
       Assert.assertEquals("double num nulls", numNulls.get(offset),
-          (Long)obj.getStatsData().getDoubleStats().getNumNulls());
+          (Long) colstats.getDoubleStats().getNumNulls());
       Assert.assertEquals("double num dvs", numDvs.get(offset),
-          (Long)obj.getStatsData().getDoubleStats().getNumDVs());
+          (Long) colstats.getDoubleStats().getNumDVs());
       Assert.assertEquals("double low val", lowVals.get(offset),
-          obj.getStatsData().getDoubleStats().getLowValue(), 0.01);
+          colstats.getDoubleStats().getLowValue(), 0.01);
       Assert.assertEquals("double high val", highVals.get(offset),
-          obj.getStatsData().getDoubleStats().getHighValue(), 0.01);
+          colstats.getDoubleStats().getHighValue(), 0.01);
     }
 
     @Override
@@ -644,16 +665,15 @@ public class TestStats {
     }
 
     @Override
-    void compare(ColumnStatisticsObj obj, int offset) {
-      compareCommon(obj);
+    void compare(ColumnStatisticsData colstats, int offset) {
       Assert.assertEquals("long num nulls", numNulls.get(offset),
-          (Long)obj.getStatsData().getLongStats().getNumNulls());
+          (Long) colstats.getLongStats().getNumNulls());
       Assert.assertEquals("long num dvs", numDvs.get(offset),
-          (Long)obj.getStatsData().getLongStats().getNumDVs());
+          (Long) colstats.getLongStats().getNumDVs());
       Assert.assertEquals("long low val", (long)lowVals.get(offset),
-          obj.getStatsData().getLongStats().getLowValue());
+          colstats.getLongStats().getLowValue());
       Assert.assertEquals("long high val", (long)highVals.get(offset),
-          obj.getStatsData().getLongStats().getHighValue());
+          colstats.getLongStats().getHighValue());
     }
 
     @Override
@@ -703,16 +723,15 @@ public class TestStats {
     }
 
     @Override
-    void compare(ColumnStatisticsObj obj, int offset) {
-      compareCommon(obj);
+    void compare(ColumnStatisticsData colstats, int offset) {
       Assert.assertEquals("str num nulls", numNulls.get(offset),
-          (Long)obj.getStatsData().getStringStats().getNumNulls());
+          (Long) colstats.getStringStats().getNumNulls());
       Assert.assertEquals("str num dvs", numDvs.get(offset),
-          (Long)obj.getStatsData().getStringStats().getNumDVs());
+          (Long) colstats.getStringStats().getNumDVs());
       Assert.assertEquals("str low val", (long)maxLens.get(offset),
-          obj.getStatsData().getStringStats().getMaxColLen());
+          colstats.getStringStats().getMaxColLen());
       Assert.assertEquals("str high val", avgLens.get(offset),
-          obj.getStatsData().getStringStats().getAvgColLen(), 0.01);
+          colstats.getStringStats().getAvgColLen(), 0.01);
     }
 
     @Override


[2/2] hive git commit: HIVE-21078: Replicate column and table level statistics for unpartitioned Hive tables (Ashutosh Bapat, reviewed by Sankar Hariappan)

Posted by sa...@apache.org.
HIVE-21078: Replicate column and table level statistics for unpartitioned Hive tables (Ashutosh Bapat, reviewed by Sankar Hariappan)

Signed-off-by: Sankar Hariappan <sa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2ffca04a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2ffca04a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2ffca04a

Branch: refs/heads/master
Commit: 2ffca04a8b58979b4995a5a6eb264f8f59d9b425
Parents: eba9646
Author: Ashutosh Bapat <ab...@cloudera.com>
Authored: Wed Jan 23 16:21:04 2019 +0530
Committer: Sankar Hariappan <sa...@apache.org>
Committed: Wed Jan 23 16:24:28 2019 +0530

----------------------------------------------------------------------
 .../listener/DbNotificationListener.java        |   1 +
 .../hive/ql/parse/TestReplicationScenarios.java |   1 +
 .../TestReplicationScenariosAcidTables.java     |   1 +
 ...TestReplicationScenariosAcrossInstances.java |   1 +
 .../ql/parse/TestStatsReplicationScenarios.java | 306 +++++++++++++++++++
 ...stStatsReplicationScenariosNoAutogather.java |  55 ++++
 .../hadoop/hive/ql/parse/WarehouseInstance.java |  41 +++
 .../hive/ql/exec/ColumnStatsUpdateTask.java     |  32 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  13 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |  18 +-
 .../hadoop/hive/ql/exec/repl/ReplDumpTask.java  |   6 +-
 .../events/filesystem/FSTableEvent.java         |   5 +
 .../repl/bootstrap/load/table/LoadTable.java    |   1 +
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  41 ++-
 .../ql/metadata/SessionHiveMetaStoreClient.java |  22 +-
 .../apache/hadoop/hive/ql/metadata/Table.java   |  27 ++
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |  11 +-
 .../hive/ql/parse/repl/dump/HiveWrapper.java    |   8 +-
 .../hive/ql/parse/repl/dump/TableExport.java    |   6 +-
 .../repl/dump/events/AlterTableHandler.java     |   9 +
 .../repl/dump/events/CreateTableHandler.java    |   8 +
 .../dump/events/UpdateTableColStatHandler.java  |  21 +-
 .../load/message/UpdateTableColStatHandler.java |  37 ++-
 .../hive/ql/plan/ColumnStatsUpdateWork.java     |  15 +
 .../hadoop/hive/ql/plan/CreateTableDesc.java    |  40 ++-
 .../hadoop/hive/ql/plan/ImportTableDesc.java    |   3 +-
 .../apache/hadoop/hive/ql/plan/MoveWork.java    |   9 +
 .../hadoop/hive/ql/exec/TestExecDriver.java     |   2 +-
 .../hive/metastore/api/GetTableRequest.java     | 111 ++++++-
 .../apache/hadoop/hive/metastore/api/Table.java | 116 ++++++-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  51 ++++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  31 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   8 +-
 .../hadoop/hive/common/StatsSetupConst.java     |  29 ++
 .../hive/metastore/HiveMetaStoreClient.java     |  17 ++
 .../hadoop/hive/metastore/IMetaStoreClient.java |  44 +++
 .../src/main/thrift/hive_metastore.thrift       |   6 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |  61 +++-
 .../hadoop/hive/metastore/ObjectStore.java      |   4 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |   1 -
 .../hive/metastore/cache/CachedStore.java       |   1 +
 .../events/UpdateTableColumnStatEvent.java      |  11 +-
 .../metastore/messaging/MessageBuilder.java     |   5 +-
 .../messaging/UpdateTableColumnStatMessage.java |   4 +
 .../json/JSONUpdateTableColumnStatMessage.java  |  13 +-
 .../HiveMetaStoreClientPreCatalog.java          |  12 +
 .../apache/hadoop/hive/metastore/TestStats.java |  89 +++---
 47 files changed, 1236 insertions(+), 118 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
----------------------------------------------------------------------
diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index fa7ab25..81b35a4 100644
--- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -757,6 +757,7 @@ public class DbNotificationListener extends TransactionalMetaStoreEventListener
   public void onUpdateTableColumnStat(UpdateTableColumnStatEvent updateTableColumnStatEvent) throws MetaException {
     UpdateTableColumnStatMessage msg = MessageBuilder.getInstance()
             .buildUpdateTableColumnStatMessage(updateTableColumnStatEvent.getColStats(),
+                    updateTableColumnStatEvent.getTableObj(),
                     updateTableColumnStatEvent.getTableParameters(),
                     updateTableColumnStatEvent.getValidWriteIds(), updateTableColumnStatEvent.getWriteId());
     NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_TABLE_COLUMN_STAT.toString(),

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index c85a2a4..6e9c443 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -570,6 +570,7 @@ public class TestReplicationScenarios {
       @Nullable
       @Override
       public Table apply(@Nullable Table table) {
+        LOG.info("Performing injection on table " + table.getTableName());
         if (table.getTableName().equalsIgnoreCase("ptned")){
           injectionPathCalled = true;
           return null;

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
index 4472a61..342985e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
@@ -66,6 +66,7 @@ import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLI
  * TestReplicationScenariosAcidTables - test replication for ACID tables
  */
 public class TestReplicationScenariosAcidTables {
+
   @Rule
   public final TestName testName = new TestName();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
index 0df99b3..1adec4e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
@@ -305,6 +305,7 @@ public class TestReplicationScenariosAcrossInstances extends BaseReplicationAcro
     WarehouseInstance.Tuple tuple = primary
         .run("use " + primaryDbName)
         .run("create table t1 (id int)")
+        .run("insert into t1 values (1), (2)")
         .run("create table t2 (place string) partitioned by (country string)")
         .run("insert into table t2 partition(country='india') values ('bangalore')")
         .run("insert into table t2 partition(country='us') values ('austin')")

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
new file mode 100644
index 0000000..8815a13
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder;
+import org.apache.hadoop.hive.ql.parse.repl.PathBuilder;
+import org.apache.hadoop.hive.shims.Utils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+
+/**
+ * Tests for statistics replication.
+ */
+public class TestStatsReplicationScenarios {
+  @Rule
+  public final TestName testName = new TestName();
+
+  protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationScenarios.class);
+  static WarehouseInstance primary;
+  private static WarehouseInstance replica;
+  private String primaryDbName, replicatedDbName;
+  private static HiveConf conf;
+  private static boolean hasAutogather;
+
+  @BeforeClass
+  public static void classLevelSetup() throws Exception {
+    Map<String, String> overrides = new HashMap<>();
+    overrides.put(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(),
+        GzipJSONMessageEncoder.class.getCanonicalName());
+
+    internalBeforeClassSetup(overrides, TestReplicationScenarios.class, true);
+  }
+
+  static void internalBeforeClassSetup(Map<String, String> overrides, Class clazz,
+                                       boolean autogather)
+      throws Exception {
+    conf = new HiveConf(clazz);
+    conf.set("dfs.client.use.datanode.hostname", "true");
+    conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
+    MiniDFSCluster miniDFSCluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
+    Map<String, String> localOverrides = new HashMap<String, String>() {{
+        put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString());
+        put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
+      }};
+    localOverrides.putAll(overrides);
+    replica = new WarehouseInstance(LOG, miniDFSCluster, localOverrides);
+
+    // Run with autogather false on primary if requested
+    hasAutogather = autogather;
+    localOverrides.put(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname,
+                        autogather ? "true" : "false");
+    primary = new WarehouseInstance(LOG, miniDFSCluster, localOverrides);
+  }
+
+  @AfterClass
+  public static void classLevelTearDown() throws IOException {
+    primary.close();
+    replica.close();
+  }
+
+  @Before
+  public void setup() throws Throwable {
+    primaryDbName = testName.getMethodName() + "_" + +System.currentTimeMillis();
+    replicatedDbName = "replicated_" + primaryDbName;
+    primary.run("create database " + primaryDbName + " WITH DBPROPERTIES ( '" +
+            SOURCE_OF_REPLICATION + "' = '1,2,3')");
+  }
+
+  @After
+  public void tearDown() throws Throwable {
+    primary.run("drop database if exists " + primaryDbName + " cascade");
+    replica.run("drop database if exists " + replicatedDbName + " cascade");
+  }
+
+
+  private Map<String, String> collectStatsParams(Map<String, String> allParams) {
+    Map<String, String> statsParams = new HashMap<String, String>();
+    List<String> params = new ArrayList<>(StatsSetupConst.SUPPORTED_STATS);
+    params.add(StatsSetupConst.COLUMN_STATS_ACCURATE);
+    for (String param : params) {
+      String value = allParams.get(param);
+      if (value != null) {
+        statsParams.put(param, value);
+      }
+    }
+
+    return statsParams;
+  }
+
+  private void verifyReplicatedStatsForTable(String tableName) throws Exception {
+    // Test column stats
+    Assert.assertEquals(primary.getTableColumnStatistics(primaryDbName, tableName),
+                        replica.getTableColumnStatistics(replicatedDbName, tableName));
+
+    // Test table level stats
+    Map<String, String> rParams =
+            collectStatsParams(replica.getTable(replicatedDbName, tableName).getParameters());
+    Map<String, String> pParams =
+            collectStatsParams(primary.getTable(primaryDbName, tableName).getParameters());
+    Assert.assertEquals(pParams, rParams);
+  }
+
+  private void verifyNoStatsReplicationForMetadataOnly(String tableName) throws Throwable {
+    // Test column stats
+    Assert.assertTrue(replica.getTableColumnStatistics(replicatedDbName, tableName).isEmpty());
+
+    // When no data is replicated, the basic stats parameters for table should look as if it's a
+    // new table created on replica. Based on the create table rules the basic stats may be true
+    // or false. Either is fine with us so don't bother checking exact values.
+    Map<String, String> rParams =
+            collectStatsParams(replica.getTable(replicatedDbName, tableName).getParameters());
+    List<String> params = new ArrayList<>(StatsSetupConst.SUPPORTED_STATS);
+    Map<String, String> expectedFalseParams = new HashMap<>();
+    Map<String, String> expectedTrueParams = new HashMap<>();
+    StatsSetupConst.setStatsStateForCreateTable(expectedTrueParams,
+            replica.getTableColNames(replicatedDbName, tableName), StatsSetupConst.TRUE);
+    StatsSetupConst.setStatsStateForCreateTable(expectedFalseParams,
+            replica.getTableColNames(replicatedDbName, tableName), StatsSetupConst.FALSE);
+    Assert.assertTrue(rParams.equals(expectedFalseParams) || rParams.equals(expectedTrueParams));
+  }
+
+  private List<String> createBootStrapData() throws Throwable {
+    String simpleTableName = "sTable";
+    String partTableName = "pTable";
+    String ndTableName = "ndTable";
+
+    primary.run("use " + primaryDbName)
+            .run("create table " + simpleTableName + " (id int)")
+            .run("insert into " + simpleTableName + " values (1), (2)")
+            .run("create table " + partTableName + " (place string) partitioned by (country string)")
+            .run("insert into table " + partTableName + " partition(country='india') values ('bangalore')")
+            .run("insert into table " + partTableName + " partition(country='us') values ('austin')")
+            .run("insert into table " + partTableName + " partition(country='france') values ('paris')")
+            .run("create table " + ndTableName + " (str string)");
+
+    List<String> tableNames = new ArrayList<String>(Arrays.asList(simpleTableName, partTableName,
+            ndTableName));
+
+    // Run analyze on each of the tables, if they are not being gathered automatically.
+    if (!hasAutogather) {
+      for (String name : tableNames) {
+        Assert.assertTrue(primary.getTableColumnStatistics(primaryDbName, name).isEmpty());
+        primary.run("use " + primaryDbName)
+                .run("analyze table " + name + " compute statistics for columns");
+      }
+    }
+
+    return tableNames;
+  }
+
+  /**
+   * Dumps primarydb on primary, loads it on replica as replicadb, verifies that the statistics
+   * loaded are same as the ones on primary.
+   * @param tableNames, names of tables on primary expected to be loaded
+   * @param lastReplicationId of the last dump, for incremental dump/load
+   * @param parallelLoad, if true, parallel bootstrap load is used
+   * @param metadataOnly, only metadata is dumped and loaded.
+   * @return lastReplicationId of the dump performed.
+   */
+  private String dumpLoadVerify(List<String> tableNames, String lastReplicationId,
+                                boolean parallelLoad, boolean metadataOnly)
+          throws Throwable {
+    List<String> withClauseList;
+    // Parallel load works only for bootstrap.
+    parallelLoad = parallelLoad && (lastReplicationId == null);
+
+    // With clause construction for REPL DUMP command.
+    if (metadataOnly) {
+      withClauseList = Collections.singletonList("'hive.repl.dump.metadata.only'='true'");
+    } else {
+      withClauseList = Collections.emptyList();
+    }
+
+    // Take dump
+    WarehouseInstance.Tuple dumpTuple = primary.run("use " + primaryDbName)
+            .dump(primaryDbName, lastReplicationId, withClauseList);
+
+    // Load, if necessary changing configuration.
+    if (parallelLoad && lastReplicationId == null) {
+      replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, true);
+    }
+
+    replica.load(replicatedDbName, dumpTuple.dumpLocation)
+            .run("use " + replicatedDbName)
+            .run("show tables")
+            .verifyResults(tableNames.toArray(new String[1]));
+
+    // Metadata load may not load all the events.
+    if (!metadataOnly) {
+      replica.run("repl status " + replicatedDbName)
+              .verifyResult(dumpTuple.lastReplicationId);
+    }
+
+    if (parallelLoad) {
+      replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, false);
+    }
+
+    // Test statistics
+    for (String name : tableNames) {
+      if (metadataOnly) {
+        verifyNoStatsReplicationForMetadataOnly(name);
+      } else {
+        verifyReplicatedStatsForTable(name);
+      }
+    }
+
+    return dumpTuple.lastReplicationId;
+  }
+
+  private void createIncrementalData(List<String> tableNames) throws Throwable {
+    String simpleTableName = "sTable";
+    String partTableName = "pTable";
+    String ndTableName = "ndTable";
+
+    Assert.assertTrue(tableNames.containsAll(Arrays.asList(simpleTableName, partTableName,
+                                                         ndTableName)));
+    String incTableName = "iTable"; // New table
+
+    primary.run("use " + primaryDbName)
+            .run("insert into " + simpleTableName + " values (3), (4)")
+            // new data inserted into table
+            .run("insert into " + ndTableName + " values ('string1'), ('string2')")
+            // two partitions changed and one unchanged
+            .run("insert into table " + partTableName + " values ('india', 'pune')")
+            .run("insert into table " + partTableName + " values ('us', 'chicago')")
+            // new partition
+            .run("insert into table " + partTableName + " values ('australia', 'perth')")
+            .run("create table " + incTableName + " (config string, enabled boolean)")
+            .run("insert into " + incTableName + " values ('conf1', true)")
+            .run("insert into " + incTableName + " values ('conf2', false)");
+    tableNames.add(incTableName);
+
+    // Run analyze on each of the tables, if they are not being gathered automatically.
+    if (!hasAutogather) {
+      for (String name : tableNames) {
+        primary.run("use " + primaryDbName)
+                .run("analyze table " + name + " compute statistics for columns");
+      }
+    }
+
+  }
+
+  public void testStatsReplicationCommon(boolean parallelBootstrap, boolean metadataOnly) throws Throwable {
+    List<String> tableNames = createBootStrapData();
+    String lastReplicationId = dumpLoadVerify(tableNames, null, parallelBootstrap,
+            metadataOnly);
+
+    // Incremental dump
+    createIncrementalData(tableNames);
+    lastReplicationId = dumpLoadVerify(tableNames, lastReplicationId, parallelBootstrap,
+            metadataOnly);
+  }
+
+  @Test
+  public void testForNonAcidTables() throws Throwable {
+    testStatsReplicationCommon(false, false);
+  }
+
+  @Test
+  public void testForNonAcidTablesParallelBootstrapLoad() throws Throwable {
+    testStatsReplicationCommon(true, false);
+  }
+
+  @Test
+  public void testNonAcidMetadataOnlyDump() throws Throwable {
+    testStatsReplicationCommon(false, true);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenariosNoAutogather.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenariosNoAutogather.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenariosNoAutogather.java
new file mode 100644
index 0000000..f58ddb8
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenariosNoAutogather.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.junit.Assert;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Tests statistics replication when statistics are collected using ANALYZE command.
+ */
+public class TestStatsReplicationScenariosNoAutogather extends TestStatsReplicationScenarios {
+  @Rule
+  public final TestName testName = new TestName();
+
+  protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationScenarios.class);
+  static WarehouseInstance primary;
+  private static WarehouseInstance replica;
+  private String primaryDbName, replicatedDbName;
+  private static HiveConf conf;
+
+  @BeforeClass
+  public static void classLevelSetup() throws Exception {
+    Map<String, String> overrides = new HashMap<>();
+    overrides.put(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(),
+        GzipJSONMessageEncoder.class.getCanonicalName());
+
+    internalBeforeClassSetup(overrides, TestReplicationScenarios.class, false);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
index bf4154c..b272f06 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
@@ -380,6 +381,46 @@ public class WarehouseInstance implements Closeable {
     }
   }
 
+  /**
+   * Get statistics for given set of columns of a given table in the given database.
+   * @param dbName - the database where the table resides
+   * @param tableName - tablename whose statistics are to be retrieved
+   * @return - list of ColumnStatisticsObj objects in the order of the specified columns
+   */
+  public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName) throws Exception {
+    return client.getTableColumnStatistics(dbName, tableName, getTableColNames(dbName, tableName));
+  }
+
+  /**
+   * @param dbName, database name
+   * @param tableName, table name
+   * @return - list of columns of given table in the given database.
+   * @throws Exception
+   */
+  public List<String> getTableColNames(String dbName, String tableName) throws Exception {
+    List<String> colNames = new ArrayList();
+    client.getSchema(dbName, tableName).forEach(fs -> colNames.add(fs.getName()));
+    return colNames;
+  }
+
+  /**
+   * Get statistics for given set of columns for all the partitions of a given table in the given
+   * database.
+   * @param dbName - the database where the table resides
+   * @param tableName - name of the partitioned table in the database
+   * @param colNames - columns whose statistics is to be retrieved
+   * @return Map of partition name and list of ColumnStatisticsObj. The objects in the list are
+   * ordered according to the given list of columns.
+   * @throws Exception
+   */
+  Map<String, List<ColumnStatisticsObj>> getAllPartitionColumnStatistics(String dbName,
+                                                                         String tableName,
+                                                                         List<String> colNames)
+          throws Exception {
+    return client.getPartitionColumnStatistics(dbName, tableName,
+            client.listPartitionNames(dbName, tableName, (short) -1), colNames);
+  }
+
   public List<Partition> getAllPartitions(String dbName, String tableName) throws Exception {
     try {
       return client.listPartitions(dbName, tableName, Short.MAX_VALUE);

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
index 61fb3d3..cf00d7b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
@@ -21,11 +21,11 @@ package org.apache.hadoop.hive.ql.exec;
 import java.io.IOException;
 import java.math.BigDecimal;
 import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
+import java.util.Collections;
 import java.util.Map;
 import java.util.Map.Entry;
 
+import org.apache.hadoop.hive.common.TableName;
 import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
@@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -77,6 +78,14 @@ public class ColumnStatsUpdateTask extends Task<ColumnStatsUpdateWork> {
   private ColumnStatistics constructColumnStatsFromInput()
       throws SemanticException, MetaException {
 
+    // If we are replicating the stats, we don't need to construct those again.
+    if (work.getColStats() != null) {
+      ColumnStatistics colStats = work.getColStats();
+      LOG.debug("Got stats through replication for " +
+              colStats.getStatsDesc().getDbName() + "." +
+              colStats.getStatsDesc().getTableName());
+      return colStats;
+    }
     String dbName = work.dbName();
     String tableName = work.getTableName();
     String partName = work.getPartName();
@@ -287,9 +296,22 @@ public class ColumnStatsUpdateTask extends Task<ColumnStatsUpdateWork> {
   }
 
   private int persistColumnStats(Hive db) throws HiveException, MetaException, IOException {
-    List<ColumnStatistics> colStats = new ArrayList<>();
-    colStats.add(constructColumnStatsFromInput());
-    SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats);
+    ColumnStatistics colStats = constructColumnStatsFromInput();
+    ColumnStatisticsDesc colStatsDesc = colStats.getStatsDesc();
+    // We do not support stats replication for a transactional table yet. If we are converting
+    // a non-transactional table to a transactional table during replication, we might get
+    // column statistics but we shouldn't update those.
+    if (work.getColStats() != null &&
+        AcidUtils.isTransactionalTable(getHive().getTable(colStatsDesc.getDbName(),
+                                                          colStatsDesc.getTableName()))) {
+      LOG.debug("Skipped updating column stats for table " +
+                TableName.getDbTable(colStatsDesc.getDbName(), colStatsDesc.getTableName()) +
+                " because it is converted to a transactional table during replication.");
+      return 0;
+    }
+
+    SetPartitionsStatsRequest request =
+            new SetPartitionsStatsRequest(Collections.singletonList(colStats));
     db.setPartitionColumnStatistics(request);
     return 0;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index dfa7e5e..cb7fdf7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4724,6 +4724,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     if (crtTbl.getReplaceMode()) {
       ReplicationSpec replicationSpec = crtTbl.getReplicationSpec();
       long writeId = 0;
+      EnvironmentContext environmentContext = null;
       if (replicationSpec != null && replicationSpec.isInReplicationScope()) {
         if (replicationSpec.isMigratingToTxnTable()) {
           // for migration we start the transaction and allocate write id in repl txn task for migration.
@@ -4735,11 +4736,19 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         } else {
           writeId = crtTbl.getReplWriteId();
         }
+
+        // In case of replication statistics is obtained from the source, so do not update those
+        // on replica. Since we are not replicating statisics for transactional tables, do not do
+        // so for transactional tables right now.
+        if (!AcidUtils.isTransactionalTable(crtTbl)) {
+          environmentContext = new EnvironmentContext();
+          environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
+        }
       }
 
       // replace-mode creates are really alters using CreateTableDesc.
-      db.alterTable(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), tbl, false, null,
-              true, writeId);
+      db.alterTable(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), tbl, false,
+              environmentContext, true, writeId);
     } else {
       if ((foreignKeys != null && foreignKeys.size() > 0) ||
           (primaryKeys != null && primaryKeys.size() > 0) ||

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index ca4391f..fb35c79 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -398,8 +398,24 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
             Utilities.FILE_OP_LOGGER.trace("loadTable called from " + tbd.getSourcePath()
               + " into " + tbd.getTable().getTableName());
           }
+
+          boolean resetStatistics;
+          if (hasFollowingStatsTask()) {
+            // If there's a follow-on stats task then the stats will be correct after load, so don't
+            // need to reset the statistics.
+            resetStatistics = false;
+          } else if (!work.getIsInReplicationScope()) {
+            // If the load is not happening during replication and there is not follow-on stats
+            // task, stats will be inaccurate after load and so need to be reset.
+            resetStatistics = true;
+          } else {
+            // If we are loading a table during replication, the stats will also be replicated
+            // and hence accurate if it's a non-transactional table. For transactional table we
+            // do not replicate stats yet.
+            resetStatistics = AcidUtils.isTransactionalTable(table.getParameters());
+          }
           db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getLoadFileType(),
-              work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isFullAcidOp, hasFollowingStatsTask(),
+              work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isFullAcidOp, resetStatistics,
               tbd.getWriteId(), tbd.getStmtId(), tbd.isInsertOverwrite());
           if (work.getOutputs() != null) {
             DDLTask.addIfAbsentByName(new WriteEntity(table,

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index a5b944b..947bfcf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -257,7 +257,8 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
           LOG.debug(
               "analyzeReplDump dumping table: " + tblName + " to db root " + dbRoot.toUri());
           try {
-            HiveWrapper.Tuple<Table> tableTuple = new HiveWrapper(hiveDb, dbName).table(tblName);
+            HiveWrapper.Tuple<Table> tableTuple = new HiveWrapper(hiveDb, dbName).table(tblName,
+                                                                                        conf);
             boolean shouldWriteExternalTableLocationInfo =
                 conf.getBoolVar(HiveConf.ConfVars.REPL_INCLUDE_EXTERNAL_TABLES)
                 && TableType.EXTERNAL_TABLE.equals(tableTuple.object.getTableType())
@@ -335,6 +336,9 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
       // added/modified by concurrent txns which are later than current txn. So, need to set last repl Id of this table
       // as bootstrap dump's last repl Id.
       tuple.replicationSpec.setCurrentReplicationState(String.valueOf(lastReplId));
+
+      // For now we do not replicate stats for ACID table. So, wipe out column stats if any.
+      tableSpec.tableHandle.getTTable().unsetColStats();
     }
     MmContext mmCtx = MmContext.createIfNeeded(tableSpec.tableHandle);
     new TableExport(

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
index 599eb04..d57cbd1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
@@ -97,6 +97,11 @@ public class FSTableEvent implements TableEvent {
         // If the conversion is from non transactional to transactional table
         if (AcidUtils.isTransactionalTable(table)) {
           replicationSpec().setMigratingToTxnTable();
+          // There won't be any writeId associated with statistics on source non-transactional
+          // table. We will need to associate a cooked up writeId on target for those. But that's
+          // not done yet. Till then we don't replicate statistics for ACID table even if it's
+          // available on the source.
+          table.getTTable().unsetColStats();
         }
         if (TableType.EXTERNAL_TABLE.equals(table.getTableType())) {
           // since we have converted to an external table now after applying the migration rules the

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
index e0f0979..0d1a88c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
@@ -319,6 +319,7 @@ public class LoadTable {
       );
       moveWork.setLoadTableWork(loadTableWork);
     }
+    moveWork.setIsInReplicationScope(replicationSpec.isInReplicationScope());
     Task<?> loadTableTask = TaskFactory.get(moveWork, context.hiveConf);
     copyTask.addDependentTask(loadTableTask);
     return copyTask;

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index c017790..cd59efb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1241,8 +1241,30 @@ public class Hive {
    * @return the table or if throwException is false a null value.
    * @throws HiveException
    */
-  public Table getTable(final String dbName, final String tableName,
-      boolean throwException, boolean checkTransactional) throws HiveException {
+  public Table getTable(final String dbName, final String tableName, boolean throwException,
+                        boolean checkTransactional) throws HiveException {
+    return getTable(dbName, tableName, throwException, checkTransactional, false);
+  }
+
+  /**
+   * Returns metadata of the table.
+   *
+   * @param dbName
+   *          the name of the database
+   * @param tableName
+   *          the name of the table
+   * @param throwException
+   *          controls whether an exception is thrown or a returns a null
+   * @param checkTransactional
+   *          checks whether the metadata table stats are valid (or
+   *          compilant with the snapshot isolation of) for the current transaction.
+   * @param getColumnStats
+   *          get column statistics if available
+   * @return the table or if throwException is false a null value.
+   * @throws HiveException
+   */
+  public Table getTable(final String dbName, final String tableName, boolean throwException,
+                        boolean checkTransactional, boolean getColumnStats) throws HiveException {
 
     if (tableName == null || tableName.equals("")) {
       throw new HiveException("empty table creation??");
@@ -1261,9 +1283,9 @@ public class Hive {
               dbName, tableName);
         }
         tTable = getMSC().getTable(getDefaultCatalog(conf), dbName, tableName,
-            validWriteIdList != null ? validWriteIdList.toString() : null);
+            validWriteIdList != null ? validWriteIdList.toString() : null, getColumnStats);
       } else {
-        tTable = getMSC().getTable(dbName, tableName);
+        tTable = getMSC().getTable(dbName, tableName, getColumnStats);
       }
     } catch (NoSuchObjectException e) {
       if (throwException) {
@@ -2755,14 +2777,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
    *          If the source directory is LOCAL
    * @param isSkewedStoreAsSubdir
    *          if list bucketing enabled
-   * @param hasFollowingStatsTask
-   *          if there is any following stats task
    * @param isAcidIUDoperation true if this is an ACID based Insert [overwrite]/update/delete
+   * @param resetStatistics should reset statistics as part of move.
    * @param writeId write ID allocated for the current load operation
    * @param stmtId statement ID of the current load statement
    */
   public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType, boolean isSrcLocal,
-      boolean isSkewedStoreAsSubdir, boolean isAcidIUDoperation, boolean hasFollowingStatsTask,
+      boolean isSkewedStoreAsSubdir, boolean isAcidIUDoperation, boolean resetStatistics,
       Long writeId, int stmtId, boolean isInsertOverwrite) throws HiveException {
 
     PerfLogger perfLogger = SessionState.getPerfLogger();
@@ -2835,11 +2856,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
       perfLogger.PerfLogEnd("MoveTask", PerfLogger.FILE_MOVES);
     }
     if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+      LOG.debug("setting table statistics false for " + tbl.getDbName() + "." + tbl.getTableName());
       StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
     }
 
     //column stats will be inaccurate
-    if (!hasFollowingStatsTask) {
+    if (resetStatistics) {
+      LOG.debug("Clearing table statistics for " + tbl.getDbName() + "." + tbl.getTableName());
       StatsSetupConst.clearColumnStatsState(tbl.getParameters());
     }
 
@@ -2858,7 +2881,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     }
 
     EnvironmentContext environmentContext = null;
-    if (hasFollowingStatsTask) {
+    if (!resetStatistics) {
       environmentContext = new EnvironmentContext();
       environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 322b580..83cb3ea 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -174,13 +174,20 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
   @Override
   public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name) throws MetaException,
   TException, NoSuchObjectException {
+    return getTable(dbname, name, false);
+  }
+
+  @Override
+  public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name,
+                                                             boolean getColStats) throws MetaException,
+  TException, NoSuchObjectException {
     // First check temp tables
     org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name);
     if (table != null) {
       return deepCopy(table);  // Original method used deepCopy(), do the same here.
     }
     // Try underlying client
-    return super.getTable(MetaStoreUtils.getDefaultCatalog(conf), dbname, name);
+    return super.getTable(MetaStoreUtils.getDefaultCatalog(conf), dbname, name, getColStats);
   }
 
   // Need to override this one too or dropTable breaks because it doesn't find the table when checks
@@ -188,10 +195,19 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
   @Override
   public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName,
                                                              String tableName) throws TException {
+    return getTable(catName, dbName, tableName, false);
+  }
+
+  // Need to override this one too or dropTable breaks because it doesn't find the table when checks
+  // before the drop.
+  @Override
+  public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName,
+                                                             String tableName, boolean getColStats)
+          throws TException {
     if (!DEFAULT_CATALOG_NAME.equals(catName)) {
-      return super.getTable(catName, dbName, tableName);
+      return super.getTable(catName, dbName, tableName, getColStats);
     } else {
-      return getTable(dbName, tableName);
+      return getTable(dbName, tableName, getColStats);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 89b2db3..cd483eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -36,11 +36,13 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -53,6 +55,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
@@ -1106,4 +1109,28 @@ public class Table implements Serializable {
   public Boolean isOutdatedForRewriting() {
     return outdatedForRewritingMaterializedView;
   }
+
+  public ColumnStatistics getColStats() {
+    return tTable.isSetColStats() ? tTable.getColStats() : null;
+  }
+
+  /**
+   * Setup the table level stats as if the table is new. Used when setting up Table for a new
+   * table or during replication.
+   */
+  public void setStatsStateLikeNewTable() {
+    // We do not replicate statistics for
+    // an ACID Table right now, so don't touch them right now.
+    if (AcidUtils.isTransactionalTable(this)) {
+      return;
+    }
+
+    if (isPartitioned()) {
+      StatsSetupConst.setStatsStateForCreateTable(getParameters(), null,
+              StatsSetupConst.FALSE);
+    } else {
+      StatsSetupConst.setStatsStateForCreateTable(getParameters(),
+              MetaStoreUtils.getColumnNames(getCols()), StatsSetupConst.TRUE);
+    }
+  }
 };

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index a843987..6102339 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -284,6 +284,11 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         //if the conversion is from non transactional to transactional table
         if (TxnUtils.isTransactionalTable(tblObj)) {
           replicationSpec.setMigratingToTxnTable();
+          // There won't be any writeId associated with statistics on source non-transactional
+          // table. We will need to associate a cooked up writeId on target for those. But that's
+          // not done yet. Till then we don't replicate statistics for ACID table even if it's
+          // available on the source.
+          tblObj.unsetColStats();
         }
         tblDesc = getBaseCreateTableDescFromTable(dbname, tblObj);
         if (TableType.valueOf(tblObj.getTableType()) == TableType.EXTERNAL_TABLE) {
@@ -302,7 +307,11 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
     boolean inReplicationScope = false;
     if ((replicationSpec != null) && replicationSpec.isInReplicationScope()){
       tblDesc.setReplicationSpec(replicationSpec);
-      StatsSetupConst.setBasicStatsState(tblDesc.getTblProps(), StatsSetupConst.FALSE);
+      // Statistics for a non-transactional table will be replicated separately. Don't bother
+      // with it here.
+      if (TxnUtils.isTransactionalTable(tblDesc.getTblProps())) {
+        StatsSetupConst.setBasicStatsState(tblDesc.getTblProps(), StatsSetupConst.FALSE);
+      }
       inReplicationScope = true;
       tblDesc.setReplWriteId(writeId);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
index fb8c4ca..2fa3676 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.parse.repl.dump;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -54,8 +55,11 @@ public class HiveWrapper {
     return new Tuple<>(functionForSpec, () -> db.getDatabase(dbName));
   }
 
-  public Tuple<Table> table(final String tableName) throws HiveException {
-    return new Tuple<>(functionForSpec, () -> db.getTable(dbName, tableName));
+  public Tuple<Table> table(final String tableName, HiveConf conf) throws HiveException {
+    // Column statistics won't be accurate if we are dumping only metadata
+    boolean getColStats = !conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY);
+    return new Tuple<>(functionForSpec, () -> db.getTable(dbName, tableName, true, false,
+            getColStats));
   }
 
   public static class Tuple<T> {

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
index b60be88..adc9446 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
@@ -72,9 +72,11 @@ public class TableExport {
         ? null
         : tableSpec;
     this.replicationSpec = replicationSpec;
-    if (conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY) || (this.tableSpec != null
-        && this.tableSpec.tableHandle.isView())) {
+    if (conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY) ||
+            (this.tableSpec != null && this.tableSpec.tableHandle.isView())) {
       this.replicationSpec.setIsMetadataOnly(true);
+
+      this.tableSpec.tableHandle.setStatsStateLikeNewTable();
     }
     this.db = db;
     this.distCpDoAsUser = distCpDoAsUser;

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
index 00fa370..ff43399 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.ql.parse.repl.dump.events;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
 import org.apache.hadoop.hive.ql.metadata.Table;
@@ -94,6 +95,14 @@ class AlterTableHandler extends AbstractEventHandler<AlterTableMessage> {
       withinContext.replicationSpec.setIsMetadataOnly(true);
       Table qlMdTableAfter = new Table(after);
       Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
+
+      // If we are not dumping metadata about a table, we shouldn't be dumping basic statistics
+      // as well, since that won't be accurate. So reset them to what they would look like for an
+      // empty table.
+      if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
+        qlMdTableAfter.setStatsStateLikeNewTable();
+      }
+
       EximUtil.createExportDump(
           metaDataPath.getFileSystem(withinContext.hiveConf),
           metaDataPath,

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
index 5870876..a8bf671 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage;
 import org.apache.hadoop.hive.ql.metadata.Table;
@@ -61,6 +62,13 @@ class CreateTableHandler extends AbstractEventHandler<CreateTableMessage> {
       withinContext.replicationSpec.setIsMetadataOnly(true);
     }
 
+    // If we are not dumping data about a table, we shouldn't be dumping basic statistics
+    // as well, since that won't be accurate. So reset them to what they would look like for an
+    // empty table.
+    if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
+      qlMdTable.setStatsStateLikeNewTable();
+    }
+
     Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
     EximUtil.createExportDump(
         metaDataPath.getFileSystem(withinContext.hiveConf),

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java
index a3aecde..e50a2bc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java
@@ -19,14 +19,15 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events;
 
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.messaging.UpdateTableColumnStatMessage;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
 import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 class UpdateTableColStatHandler extends AbstractEventHandler<UpdateTableColumnStatMessage> {
 
-  UpdateTableColStatHandler(NotificationEvent event) {
-    super(event);
-  }
+  UpdateTableColStatHandler(NotificationEvent event) { super(event); }
 
   @Override
   UpdateTableColumnStatMessage eventMessage(String stringRepresentation) {
@@ -35,6 +36,20 @@ class UpdateTableColStatHandler extends AbstractEventHandler<UpdateTableColumnSt
 
   @Override
   public void handle(Context withinContext) throws Exception {
+    Table qlMdTable = new Table(eventMessage.getTableObject());
+    if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, withinContext.hiveConf)) {
+      return;
+    }
+
+    // Statistics without data doesn't make sense.
+    if (withinContext.replicationSpec.isMetadataOnly()) {
+      return;
+    }
+    // For now we do not replicate the statistics for transactional tables.
+    if (AcidUtils.isTransactionalTable(qlMdTable)) {
+      return;
+    }
+
     LOG.info("Processing#{} UpdateTableColumnStat message : {}", fromEventId(), eventMessageAsJSON);
     DumpMetaData dmd = withinContext.createDmd(this);
     dmd.setPayload(eventMessageAsJSON);

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java
index eb3d18a..9a60de4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java
@@ -17,10 +17,13 @@
  */
 package org.apache.hadoop.hive.ql.parse.repl.load.message;
 
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.messaging.UpdateTableColumnStatMessage;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork;
+import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
 
 import java.io.Serializable;
 import java.util.Collections;
@@ -31,13 +34,29 @@ import java.util.List;
  * Target(Load) side handler for table stat update event
  */
 public class UpdateTableColStatHandler extends AbstractMessageHandler {
-  @Override
-  public List<Task<? extends Serializable>> handle(Context context)
-      throws SemanticException {
-    context.log.info("Replication of table stat update event is not supported yet");
-    if (!context.isDbNameEmpty()) {
-      updatedMetadata.set(context.dmd.getEventTo().toString(), context.dbName, context.tableName, null);
+    @Override
+    public List<Task<? extends Serializable>> handle(Context context)
+            throws SemanticException {
+        UpdateTableColumnStatMessage utcsm =
+                deserializer.getUpdateTableColumnStatMessage(context.dmd.getPayload());
+
+        // Update tablename and database name in the statistics object
+        ColumnStatistics colStats = utcsm.getColumnStatistics();
+        ColumnStatisticsDesc colStatsDesc = colStats.getStatsDesc();
+        colStatsDesc.setDbName(context.dbName);
+        if (!context.isTableNameEmpty()) {
+          colStatsDesc.setTableName(context.tableName);
+        }
+        if (!context.isDbNameEmpty()) {
+            updatedMetadata.set(context.dmd.getEventTo().toString(), context.dbName,
+                    context.tableName, null);
+        }
+
+      // TODO: For txn stats update, ColumnStatsUpdateTask.execute()->Hive
+      // .setPartitionColumnStatistics expects a valid writeId allocated by the current txn and
+      // also, there should be a table snapshot. But, it won't be there as update from
+      // ReplLoadTask which doesn't have a write id allocated. Need to check this further.
+        return Collections.singletonList(TaskFactory.get(new ColumnStatsUpdateWork(colStats),
+                context.hiveConf));
     }
-    return Collections.singletonList(TaskFactory.get(new DependencyCollectionWork(), context.hiveConf));
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java
index 6de1a37..1219b62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.plan;
 import java.io.Serializable;
 import java.util.Map;
 
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
@@ -42,6 +43,7 @@ public class ColumnStatsUpdateWork implements Serializable, DDLDescWithWriteId {
   private final String tableName;
   private final String colName;
   private final String colType;
+  private final ColumnStatistics colStats;
   private long writeId;
 
   public ColumnStatsUpdateWork(String partName,
@@ -56,6 +58,17 @@ public class ColumnStatsUpdateWork implements Serializable, DDLDescWithWriteId {
     this.tableName = tableName;
     this.colName = colName;
     this.colType = colType;
+    this.colStats = null;
+  }
+
+  public ColumnStatsUpdateWork(ColumnStatistics colStats) {
+    this.colStats = colStats;
+    this.partName = null;
+    this.mapProp = null;
+    this.dbName = null;
+    this.tableName = null;
+    this.colName = null;
+    this.colType = null;
   }
 
   @Override
@@ -87,6 +100,8 @@ public class ColumnStatsUpdateWork implements Serializable, DDLDescWithWriteId {
     return colType;
   }
 
+  public ColumnStatistics getColStats() { return colStats; }
+
   @Override
   public void setWriteId(long writeId) {
     this.writeId = writeId;

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
index f00148b..c71ff6d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.PartitionManagementTask;
 import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
@@ -38,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.DDLTask;
@@ -106,6 +109,7 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
   List<SQLNotNullConstraint> notNullConstraints;
   List<SQLDefaultConstraint> defaultConstraints;
   List<SQLCheckConstraint> checkConstraints;
+  private ColumnStatistics colStats;
   private Long initialMmWriteId; // Initial MM write ID for CTAS and import.
   // The FSOP configuration for the FSOP that is going to write initial data during ctas.
   // This is not needed beyond compilation, so it is transient.
@@ -127,7 +131,8 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
       boolean ifNotExists, List<String> skewedColNames, List<List<String>> skewedColValues,
       List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
       List<SQLUniqueConstraint> uniqueConstraints, List<SQLNotNullConstraint> notNullConstraints,
-      List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints) {
+      List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints,
+      ColumnStatistics colStats) {
 
     this(tableName, isExternal, isTemporary, cols, partCols,
         bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape,
@@ -137,6 +142,7 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
         primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
 
     this.databaseName = databaseName;
+    this.colStats = colStats;
   }
 
   public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary,
@@ -157,7 +163,8 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
         collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat,
         outputFormat, location, serName, storageHandler, serdeProps,
         tblProps, ifNotExists, skewedColNames, skewedColValues,
-        primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+        primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints,
+       null);
     this.partColNames = partColNames;
     this.isCTAS = isCTAS;
   }
@@ -878,14 +885,29 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
       }
     }
 
-    if (!this.isCTAS && (tbl.getPath() == null || (tbl.isEmpty() && !isExternal()))) {
-      if (!tbl.isPartitioned() && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
-        StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(),
-            MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE);
-      }
+    if (colStats != null) {
+      ColumnStatisticsDesc colStatsDesc = new ColumnStatisticsDesc(colStats.getStatsDesc());
+      colStatsDesc.setCatName(tbl.getCatName());
+      colStatsDesc.setDbName(getTableName());
+      colStatsDesc.setDbName(getDatabaseName());
+      tbl.getTTable().setColStats(new ColumnStatistics(colStatsDesc, colStats.getStatsObj()));
+    }
+
+    // The statistics for non-transactional tables will be obtained from the source. Do not
+    // reset those on replica.
+    if (replicationSpec != null && replicationSpec.isInReplicationScope() &&
+        !TxnUtils.isTransactionalTable(tbl.getTTable())) {
+      // Do nothing to the table statistics.
     } else {
-      StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), null,
-          StatsSetupConst.FALSE);
+      if (!this.isCTAS && (tbl.getPath() == null || (tbl.isEmpty() && !isExternal()))) {
+        if (!tbl.isPartitioned() && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+          StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(),
+                  MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE);
+        }
+      } else {
+        StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), null,
+                StatsSetupConst.FALSE);
+      }
     }
     return tbl;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
index 50b43ba..5c30fca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
@@ -84,7 +84,8 @@ public class ImportTableDesc {
                 null,
                 null,
             null,
-            null);
+            null,
+                table.getColStats());
         this.createTblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories());
         break;
       case VIEW:

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
index 47a56d5..8ca8e46 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
@@ -56,6 +56,7 @@ public class MoveWork implements Serializable {
    */
   protected List<Partition> movedParts;
   private boolean isNoop;
+  private boolean isInReplicationScope = false;
 
   public MoveWork() {
   }
@@ -164,4 +165,12 @@ public class MoveWork implements Serializable {
   public void setNeedCleanTarget(boolean needCleanTarget) {
     this.needCleanTarget = needCleanTarget;
   }
+
+  public void setIsInReplicationScope(boolean isInReplicationScope) {
+    this.isInReplicationScope = isInReplicationScope;
+  }
+
+  public boolean getIsInReplicationScope() {
+    return this.isInReplicationScope;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
index e108684..78f2585 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
@@ -142,7 +142,7 @@ public class TestExecDriver extends TestCase {
         db.createTable(src, cols, null, TextInputFormat.class,
             HiveIgnoreKeyTextOutputFormat.class);
         db.loadTable(hadoopDataFile[i], src, LoadFileType.KEEP_EXISTING,
-           true, false, false, false, null, 0, false);
+           true, false, false, true, null, 0, false);
         i++;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/2ffca04a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
index 2804952..3b60695 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
@@ -43,6 +43,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3);
   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
   private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField GET_COLUMN_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("getColumnStats", org.apache.thrift.protocol.TType.BOOL, (short)7);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -55,6 +56,7 @@ import org.slf4j.LoggerFactory;
   private ClientCapabilities capabilities; // optional
   private String catName; // optional
   private String validWriteIdList; // optional
+  private boolean getColumnStats; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -62,7 +64,8 @@ import org.slf4j.LoggerFactory;
     TBL_NAME((short)2, "tblName"),
     CAPABILITIES((short)3, "capabilities"),
     CAT_NAME((short)4, "catName"),
-    VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
+    VALID_WRITE_ID_LIST((short)6, "validWriteIdList"),
+    GET_COLUMN_STATS((short)7, "getColumnStats");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -87,6 +90,8 @@ import org.slf4j.LoggerFactory;
           return CAT_NAME;
         case 6: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
+        case 7: // GET_COLUMN_STATS
+          return GET_COLUMN_STATS;
         default:
           return null;
       }
@@ -127,7 +132,9 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST};
+  private static final int __GETCOLUMNSTATS_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST,_Fields.GET_COLUMN_STATS};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -141,6 +148,8 @@ import org.slf4j.LoggerFactory;
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.GET_COLUMN_STATS, new org.apache.thrift.meta_data.FieldMetaData("getColumnStats", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap);
   }
@@ -161,6 +170,7 @@ import org.slf4j.LoggerFactory;
    * Performs a deep copy on <i>other</i>.
    */
   public GetTableRequest(GetTableRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
     if (other.isSetDbName()) {
       this.dbName = other.dbName;
     }
@@ -176,6 +186,7 @@ import org.slf4j.LoggerFactory;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
     }
+    this.getColumnStats = other.getColumnStats;
   }
 
   public GetTableRequest deepCopy() {
@@ -189,6 +200,8 @@ import org.slf4j.LoggerFactory;
     this.capabilities = null;
     this.catName = null;
     this.validWriteIdList = null;
+    setGetColumnStatsIsSet(false);
+    this.getColumnStats = false;
   }
 
   public String getDbName() {
@@ -306,6 +319,28 @@ import org.slf4j.LoggerFactory;
     }
   }
 
+  public boolean isGetColumnStats() {
+    return this.getColumnStats;
+  }
+
+  public void setGetColumnStats(boolean getColumnStats) {
+    this.getColumnStats = getColumnStats;
+    setGetColumnStatsIsSet(true);
+  }
+
+  public void unsetGetColumnStats() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __GETCOLUMNSTATS_ISSET_ID);
+  }
+
+  /** Returns true if field getColumnStats is set (has been assigned a value) and false otherwise */
+  public boolean isSetGetColumnStats() {
+    return EncodingUtils.testBit(__isset_bitfield, __GETCOLUMNSTATS_ISSET_ID);
+  }
+
+  public void setGetColumnStatsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __GETCOLUMNSTATS_ISSET_ID, value);
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case DB_NAME:
@@ -348,6 +383,14 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
+    case GET_COLUMN_STATS:
+      if (value == null) {
+        unsetGetColumnStats();
+      } else {
+        setGetColumnStats((Boolean)value);
+      }
+      break;
+
     }
   }
 
@@ -368,6 +411,9 @@ import org.slf4j.LoggerFactory;
     case VALID_WRITE_ID_LIST:
       return getValidWriteIdList();
 
+    case GET_COLUMN_STATS:
+      return isGetColumnStats();
+
     }
     throw new IllegalStateException();
   }
@@ -389,6 +435,8 @@ import org.slf4j.LoggerFactory;
       return isSetCatName();
     case VALID_WRITE_ID_LIST:
       return isSetValidWriteIdList();
+    case GET_COLUMN_STATS:
+      return isSetGetColumnStats();
     }
     throw new IllegalStateException();
   }
@@ -451,6 +499,15 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
+    boolean this_present_getColumnStats = true && this.isSetGetColumnStats();
+    boolean that_present_getColumnStats = true && that.isSetGetColumnStats();
+    if (this_present_getColumnStats || that_present_getColumnStats) {
+      if (!(this_present_getColumnStats && that_present_getColumnStats))
+        return false;
+      if (this.getColumnStats != that.getColumnStats)
+        return false;
+    }
+
     return true;
   }
 
@@ -483,6 +540,11 @@ import org.slf4j.LoggerFactory;
     if (present_validWriteIdList)
       list.add(validWriteIdList);
 
+    boolean present_getColumnStats = true && (isSetGetColumnStats());
+    list.add(present_getColumnStats);
+    if (present_getColumnStats)
+      list.add(getColumnStats);
+
     return list.hashCode();
   }
 
@@ -544,6 +606,16 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetGetColumnStats()).compareTo(other.isSetGetColumnStats());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGetColumnStats()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.getColumnStats, other.getColumnStats);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -609,6 +681,12 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
     }
+    if (isSetGetColumnStats()) {
+      if (!first) sb.append(", ");
+      sb.append("getColumnStats:");
+      sb.append(this.getColumnStats);
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -639,6 +717,8 @@ import org.slf4j.LoggerFactory;
 
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
@@ -704,6 +784,14 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 7: // GET_COLUMN_STATS
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.getColumnStats = iprot.readBool();
+              struct.setGetColumnStatsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -748,6 +836,11 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldEnd();
         }
       }
+      if (struct.isSetGetColumnStats()) {
+        oprot.writeFieldBegin(GET_COLUMN_STATS_FIELD_DESC);
+        oprot.writeBool(struct.getColumnStats);
+        oprot.writeFieldEnd();
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -777,7 +870,10 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetValidWriteIdList()) {
         optionals.set(2);
       }
-      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetGetColumnStats()) {
+        optionals.set(3);
+      }
+      oprot.writeBitSet(optionals, 4);
       if (struct.isSetCapabilities()) {
         struct.capabilities.write(oprot);
       }
@@ -787,6 +883,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetValidWriteIdList()) {
         oprot.writeString(struct.validWriteIdList);
       }
+      if (struct.isSetGetColumnStats()) {
+        oprot.writeBool(struct.getColumnStats);
+      }
     }
 
     @Override
@@ -796,7 +895,7 @@ import org.slf4j.LoggerFactory;
       struct.setDbNameIsSet(true);
       struct.tblName = iprot.readString();
       struct.setTblNameIsSet(true);
-      BitSet incoming = iprot.readBitSet(3);
+      BitSet incoming = iprot.readBitSet(4);
       if (incoming.get(0)) {
         struct.capabilities = new ClientCapabilities();
         struct.capabilities.read(iprot);
@@ -810,6 +909,10 @@ import org.slf4j.LoggerFactory;
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }
+      if (incoming.get(3)) {
+        struct.getColumnStats = iprot.readBool();
+        struct.setGetColumnStatsIsSet(true);
+      }
     }
   }