You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sa...@apache.org on 2019/01/29 14:46:51 UTC

[11/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)

HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)

Signed-off-by: Sankar Hariappan <sa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/71dfd1d1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/71dfd1d1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/71dfd1d1

Branch: refs/heads/master
Commit: 71dfd1d11f239caf8f16bc29db0f959e566f7659
Parents: 9747083
Author: Ashutosh Bapat <ab...@cloudera.com>
Authored: Tue Jan 29 20:15:59 2019 +0530
Committer: Sankar Hariappan <sa...@apache.org>
Committed: Tue Jan 29 20:15:59 2019 +0530

----------------------------------------------------------------------
 .../listener/DbNotificationListener.java        |    1 +
 .../hive/ql/parse/TestReplicationScenarios.java |    5 +-
 .../ql/parse/TestStatsReplicationScenarios.java |  105 +-
 ...stStatsReplicationScenariosNoAutogather.java |    2 -
 .../hadoop/hive/ql/parse/WarehouseInstance.java |   14 +
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |   54 +-
 .../events/filesystem/FSTableEvent.java         |   13 +
 .../bootstrap/load/table/LoadPartitions.java    |    1 +
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   92 +-
 .../hive/ql/metadata/PartitionIterable.java     |   16 +-
 .../ql/metadata/SessionHiveMetaStoreClient.java |   15 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   36 +-
 .../hive/ql/parse/repl/dump/TableExport.java    |   11 +-
 .../repl/dump/events/AddPartitionHandler.java   |    8 +
 .../repl/dump/events/AlterPartitionHandler.java |    8 +
 .../repl/dump/events/DropPartitionHandler.java  |    9 +
 .../dump/events/UpdatePartColStatHandler.java   |   30 +-
 .../dump/events/UpdateTableColStatHandler.java  |    2 +-
 .../load/message/UpdatePartColStatHandler.java  |   22 +-
 .../hadoop/hive/ql/plan/AddPartitionDesc.java   |    6 +
 .../hive/metastore/api/AbortTxnsRequest.java    |   32 +-
 .../metastore/api/AddDynamicPartitions.java     |   32 +-
 .../api/AllocateTableWriteIdsRequest.java       |   68 +-
 .../api/AllocateTableWriteIdsResponse.java      |   36 +-
 .../metastore/api/AlterPartitionsRequest.java   |   36 +-
 .../metastore/api/ClearFileMetadataRequest.java |   32 +-
 .../hive/metastore/api/ClientCapabilities.java  |   32 +-
 .../hive/metastore/api/CommitTxnRequest.java    |   36 +-
 .../hive/metastore/api/CompactionRequest.java   |   44 +-
 .../hive/metastore/api/CreationMetadata.java    |   32 +-
 .../metastore/api/FindSchemasByColsResp.java    |   36 +-
 .../hive/metastore/api/FireEventRequest.java    |   32 +-
 .../hadoop/hive/metastore/api/Function.java     |   36 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../api/GetFileMetadataByExprRequest.java       |   32 +-
 .../api/GetFileMetadataByExprResult.java        |   48 +-
 .../metastore/api/GetFileMetadataRequest.java   |   32 +-
 .../metastore/api/GetFileMetadataResult.java    |   44 +-
 .../metastore/api/GetOpenTxnsInfoResponse.java  |   36 +-
 .../hive/metastore/api/GetOpenTxnsResponse.java |   32 +-
 .../api/GetPartitionsByNamesRequest.java        |  752 ++++
 .../api/GetPartitionsByNamesResult.java         |  443 +++
 .../metastore/api/GetPartitionsFilterSpec.java  |   32 +-
 .../api/GetPartitionsProjectionSpec.java        |   32 +-
 .../metastore/api/GetPartitionsRequest.java     |   32 +-
 .../metastore/api/GetPartitionsResponse.java    |   36 +-
 .../hive/metastore/api/GetTablesRequest.java    |   32 +-
 .../hive/metastore/api/GetTablesResult.java     |   36 +-
 .../metastore/api/GetValidWriteIdsRequest.java  |   32 +-
 .../metastore/api/GetValidWriteIdsResponse.java |   36 +-
 .../api/HeartbeatTxnRangeResponse.java          |   64 +-
 .../metastore/api/InsertEventRequestData.java   |   96 +-
 .../hadoop/hive/metastore/api/LockRequest.java  |   36 +-
 .../metastore/api/NotificationEventRequest.java |   32 +-
 .../api/NotificationEventResponse.java          |   36 +-
 .../hive/metastore/api/OpenTxnRequest.java      |   32 +-
 .../hive/metastore/api/OpenTxnsResponse.java    |   32 +-
 .../hadoop/hive/metastore/api/Partition.java    |  116 +-
 .../metastore/api/PutFileMetadataRequest.java   |   64 +-
 .../metastore/api/RenamePartitionRequest.java   |   32 +-
 .../hive/metastore/api/ReplLastIdInfo.java      |   32 +-
 .../api/ReplTblWriteIdStateRequest.java         |   32 +-
 .../hive/metastore/api/SchemaVersion.java       |   36 +-
 .../hive/metastore/api/ShowCompactResponse.java |   36 +-
 .../hive/metastore/api/ShowLocksResponse.java   |   36 +-
 .../hive/metastore/api/TableValidWriteIds.java  |   32 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 3601 ++++++++++++------
 .../hive/metastore/api/WMFullResourcePlan.java  |  144 +-
 .../api/WMGetAllResourcePlanResponse.java       |   36 +-
 .../WMGetTriggersForResourePlanResponse.java    |   36 +-
 .../api/WMValidateResourcePlanResponse.java     |   64 +-
 .../api/WriteNotificationLogRequest.java        |   32 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1660 ++++----
 .../src/gen/thrift/gen-php/metastore/Types.php  | 1421 ++++---
 .../hive_metastore/ThriftHiveMetastore-remote   |    7 +
 .../hive_metastore/ThriftHiveMetastore.py       | 1163 +++---
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  950 +++--
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   45 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   66 +
 .../hive/metastore/HiveMetaStoreClient.java     |   21 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |   30 +
 .../src/main/thrift/hive_metastore.thrift       |   16 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |  103 +-
 .../events/UpdatePartitionColumnStatEvent.java  |   12 +-
 .../metastore/messaging/MessageBuilder.java     |    4 +-
 .../UpdatePartitionColumnStatMessage.java       |    4 +
 .../JSONUpdatePartitionColumnStatMessage.java   |   13 +-
 .../HiveMetaStoreClientPreCatalog.java          |   21 +-
 .../apache/hadoop/hive/metastore/TestStats.java |   22 +
 89 files changed, 8405 insertions(+), 4467 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
----------------------------------------------------------------------
diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index 81b35a4..8404e3e 100644
--- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -788,6 +788,7 @@ public class DbNotificationListener extends TransactionalMetaStoreEventListener
             .buildUpdatePartitionColumnStatMessage(updatePartColStatEvent.getPartColStats(),
                     updatePartColStatEvent.getPartVals(),
                     updatePartColStatEvent.getPartParameters(),
+                    updatePartColStatEvent.getTableObj(),
                     updatePartColStatEvent.getValidWriteIds(), updatePartColStatEvent.getWriteId());
     NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_PARTITION_COLUMN_STAT.toString(),
                     msgEncoder.getSerializer().serialize(msg));

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index 6e9c443..3820fab 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -423,10 +423,11 @@ public class TestReplicationScenarios {
     run("insert into table " + dbName + ".t2 partition(country='india') values ('delhi')", driver);
     dump = replDumpDb(dbName, dump.lastReplId, null, null);
 
-    //no partition task should be added as the operation is inserting into an existing partition
+    // Partition level statistics gets updated as part of the INSERT above. So we see a partition
+    // task corresponding to an ALTER_PARTITION event.
     task = getReplLoadRootTask(dbNameReplica, true, dump);
     assertEquals(true, hasMoveTask(task));
-    assertEquals(false, hasPartitionTask(task));
+    assertEquals(true, hasPartitionTask(task));
 
     loadAndVerify(dbNameReplica, dump.dumpLocation, dump.lastReplId);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
index 8815a13..1ec4498 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
@@ -20,9 +20,10 @@ package org.apache.hadoop.hive.ql.parse;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder;
-import org.apache.hadoop.hive.ql.parse.repl.PathBuilder;
 import org.apache.hadoop.hive.shims.Utils;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -112,7 +113,7 @@ public class TestStatsReplicationScenarios {
 
 
   private Map<String, String> collectStatsParams(Map<String, String> allParams) {
-    Map<String, String> statsParams = new HashMap<String, String>();
+    Map<String, String> statsParams = new HashMap<>();
     List<String> params = new ArrayList<>(StatsSetupConst.SUPPORTED_STATS);
     params.add(StatsSetupConst.COLUMN_STATS_ACCURATE);
     for (String param : params) {
@@ -125,7 +126,7 @@ public class TestStatsReplicationScenarios {
     return statsParams;
   }
 
-  private void verifyReplicatedStatsForTable(String tableName) throws Exception {
+  private void verifyReplicatedStatsForTable(String tableName) throws Throwable {
     // Test column stats
     Assert.assertEquals(primary.getTableColumnStatistics(primaryDbName, tableName),
                         replica.getTableColumnStatistics(replicatedDbName, tableName));
@@ -136,6 +137,32 @@ public class TestStatsReplicationScenarios {
     Map<String, String> pParams =
             collectStatsParams(primary.getTable(primaryDbName, tableName).getParameters());
     Assert.assertEquals(pParams, rParams);
+
+    verifyReplicatedStatsForPartitionsOfTable(tableName);
+  }
+
+  private void verifyReplicatedStatsForPartitionsOfTable(String tableName)
+          throws Throwable {
+    // Test partition level stats
+    List<Partition> pParts = primary.getAllPartitions(primaryDbName, tableName);
+
+    if (pParts == null || pParts.isEmpty()) {
+      // Not a partitioned table, nothing to verify.
+      return;
+    }
+
+    for (Partition pPart : pParts) {
+      Partition rPart = replica.getPartition(replicatedDbName, tableName,
+              pPart.getValues());
+
+      Map<String, String> rParams = collectStatsParams(rPart.getParameters());
+      Map<String, String> pParams = collectStatsParams(pPart.getParameters());
+      Assert.assertEquals(pParams, rParams);
+    }
+
+    // Test partition column stats for all partitions
+    Assert.assertEquals(primary.getAllPartitionColumnStatistics(primaryDbName, tableName),
+                        replica.getAllPartitionColumnStatistics(replicatedDbName, tableName));
   }
 
   private void verifyNoStatsReplicationForMetadataOnly(String tableName) throws Throwable {
@@ -147,7 +174,6 @@ public class TestStatsReplicationScenarios {
     // or false. Either is fine with us so don't bother checking exact values.
     Map<String, String> rParams =
             collectStatsParams(replica.getTable(replicatedDbName, tableName).getParameters());
-    List<String> params = new ArrayList<>(StatsSetupConst.SUPPORTED_STATS);
     Map<String, String> expectedFalseParams = new HashMap<>();
     Map<String, String> expectedTrueParams = new HashMap<>();
     StatsSetupConst.setStatsStateForCreateTable(expectedTrueParams,
@@ -155,24 +181,54 @@ public class TestStatsReplicationScenarios {
     StatsSetupConst.setStatsStateForCreateTable(expectedFalseParams,
             replica.getTableColNames(replicatedDbName, tableName), StatsSetupConst.FALSE);
     Assert.assertTrue(rParams.equals(expectedFalseParams) || rParams.equals(expectedTrueParams));
+
+    verifyNoPartitionStatsReplicationForMetadataOnly(tableName);
+  }
+
+  private void verifyNoPartitionStatsReplicationForMetadataOnly(String tableName) throws Throwable {
+    // Test partition level stats
+    List<Partition> pParts = primary.getAllPartitions(primaryDbName, tableName);
+
+    if (pParts == null || pParts.isEmpty()) {
+      // Not a partitioned table, nothing to verify.
+      return;
+    }
+
+    // Partitions are not replicated in metadata only replication.
+    List<Partition> rParts = replica.getAllPartitions(replicatedDbName, tableName);
+    Assert.assertTrue(rParts == null || rParts.isEmpty());
+
+    // Test partition column stats for all partitions
+    Map<String, List<ColumnStatisticsObj>> rPartColStats =
+            replica.getAllPartitionColumnStatistics(replicatedDbName, tableName);
+    for (Map.Entry<String, List<ColumnStatisticsObj>> entry: rPartColStats.entrySet()) {
+      List<ColumnStatisticsObj> colStats = entry.getValue();
+      Assert.assertTrue(colStats == null || colStats.isEmpty());
+    }
   }
 
   private List<String> createBootStrapData() throws Throwable {
+    // Unpartitioned table with data
     String simpleTableName = "sTable";
+    // partitioned table with data
     String partTableName = "pTable";
+    // Unpartitioned table without data during bootstrap and hence no stats
     String ndTableName = "ndTable";
+    // Partitioned table without data during bootstrap and hence no stats.
+    String ndPartTableName = "ndPTable";
 
     primary.run("use " + primaryDbName)
             .run("create table " + simpleTableName + " (id int)")
             .run("insert into " + simpleTableName + " values (1), (2)")
             .run("create table " + partTableName + " (place string) partitioned by (country string)")
-            .run("insert into table " + partTableName + " partition(country='india') values ('bangalore')")
-            .run("insert into table " + partTableName + " partition(country='us') values ('austin')")
-            .run("insert into table " + partTableName + " partition(country='france') values ('paris')")
-            .run("create table " + ndTableName + " (str string)");
+            .run("insert into " + partTableName + " partition(country='india') values ('bangalore')")
+            .run("insert into " + partTableName + " partition(country='us') values ('austin')")
+            .run("insert into " + partTableName + " partition(country='france') values ('paris')")
+            .run("create table " + ndTableName + " (str string)")
+            .run("create table " + ndPartTableName + " (val string) partitioned by (pk int)");
 
-    List<String> tableNames = new ArrayList<String>(Arrays.asList(simpleTableName, partTableName,
-            ndTableName));
+    List<String> tableNames = new ArrayList<>(Arrays.asList(simpleTableName, partTableName,
+            ndTableName, ndPartTableName));
 
     // Run analyze on each of the tables, if they are not being gathered automatically.
     if (!hasAutogather) {
@@ -214,7 +270,7 @@ public class TestStatsReplicationScenarios {
             .dump(primaryDbName, lastReplicationId, withClauseList);
 
     // Load, if necessary changing configuration.
-    if (parallelLoad && lastReplicationId == null) {
+    if (parallelLoad) {
       replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, true);
     }
 
@@ -246,27 +302,41 @@ public class TestStatsReplicationScenarios {
   }
 
   private void createIncrementalData(List<String> tableNames) throws Throwable {
+    // Annotations for this table are same as createBootStrapData
     String simpleTableName = "sTable";
     String partTableName = "pTable";
     String ndTableName = "ndTable";
+    String ndPartTableName = "ndPTable";
 
     Assert.assertTrue(tableNames.containsAll(Arrays.asList(simpleTableName, partTableName,
-                                                         ndTableName)));
+                                                         ndTableName, ndPartTableName)));
+    // New tables created during incremental phase and thus loaded with data and stats during
+    // incremental phase.
     String incTableName = "iTable"; // New table
+    String incPartTableName = "ipTable"; // New partitioned table
 
     primary.run("use " + primaryDbName)
             .run("insert into " + simpleTableName + " values (3), (4)")
             // new data inserted into table
             .run("insert into " + ndTableName + " values ('string1'), ('string2')")
             // two partitions changed and one unchanged
-            .run("insert into table " + partTableName + " values ('india', 'pune')")
-            .run("insert into table " + partTableName + " values ('us', 'chicago')")
+            .run("insert into " + partTableName + "(country, place) values ('india', 'pune')")
+            .run("insert into " + partTableName + "(country, place) values ('us', 'chicago')")
             // new partition
-            .run("insert into table " + partTableName + " values ('australia', 'perth')")
+            .run("insert into " + partTableName + "(country, place) values ('australia', 'perth')")
             .run("create table " + incTableName + " (config string, enabled boolean)")
             .run("insert into " + incTableName + " values ('conf1', true)")
-            .run("insert into " + incTableName + " values ('conf2', false)");
+            .run("insert into " + incTableName + " values ('conf2', false)")
+            .run("insert into " + ndPartTableName + "(pk, val) values (1, 'one')")
+            .run("insert into " + ndPartTableName + "(pk, val) values (1, 'another one')")
+            .run("insert into " + ndPartTableName + "(pk, val) values (2, 'two')")
+            .run("create table " + incPartTableName +
+                    "(val string) partitioned by (tvalue boolean)")
+            .run("insert into " + incPartTableName + "(tvalue, val) values (true, 'true')")
+            .run("insert into " + incPartTableName + "(tvalue, val) values (false, 'false')");
+
     tableNames.add(incTableName);
+    tableNames.add(incPartTableName);
 
     // Run analyze on each of the tables, if they are not being gathered automatically.
     if (!hasAutogather) {
@@ -275,10 +345,9 @@ public class TestStatsReplicationScenarios {
                 .run("analyze table " + name + " compute statistics for columns");
       }
     }
-
   }
 
-  public void testStatsReplicationCommon(boolean parallelBootstrap, boolean metadataOnly) throws Throwable {
+  private void testStatsReplicationCommon(boolean parallelBootstrap, boolean metadataOnly) throws Throwable {
     List<String> tableNames = createBootStrapData();
     String lastReplicationId = dumpLoadVerify(tableNames, null, parallelBootstrap,
             metadataOnly);

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenariosNoAutogather.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenariosNoAutogather.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenariosNoAutogather.java
index f58ddb8..51f8dfb 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenariosNoAutogather.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenariosNoAutogather.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hive.ql.parse;
 
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder;
@@ -26,7 +25,6 @@ import org.junit.Rule;
 import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.junit.Assert;
 
 import java.util.HashMap;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
index b272f06..e0547d4 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
@@ -404,6 +404,20 @@ public class WarehouseInstance implements Closeable {
   }
 
   /**
+   * Get statistics for given set of columns of a given table in the given database
+   * @param dbName - the database where the table resides
+   * @param tableName - tablename whose statistics are to be retrieved
+   * @param colNames - columns whose statistics is to be retrieved.
+   * @return - list of ColumnStatisticsObj objects in the order of the specified columns
+   */
+  public Map<String, List<ColumnStatisticsObj>> getAllPartitionColumnStatistics(String dbName,
+                                                                    String tableName) throws Exception {
+    List<String> colNames = new ArrayList();
+    client.getFields(dbName, tableName).forEach(fs -> colNames.add(fs.getName()));
+    return getAllPartitionColumnStatistics(dbName, tableName, colNames);
+  }
+
+  /**
    * Get statistics for given set of columns for all the partitions of a given table in the given
    * database.
    * @param dbName - the database where the table resides

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index fb35c79..4f2a116 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -269,6 +269,26 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
     return false;
   }
 
+  // Whether statistics need to be reset as part of MoveTask execution.
+  private boolean resetStatisticsProps(Table table) {
+    if (hasFollowingStatsTask()) {
+      // If there's a follow-on stats task then the stats will be correct after load, so don't
+      // need to reset the statistics.
+      return false;
+    }
+
+    if (!work.getIsInReplicationScope()) {
+      // If the load is not happening during replication and there is not follow-on stats
+      // task, stats will be inaccurate after load and so need to be reset.
+      return true;
+    }
+
+    // If we are loading a table during replication, the stats will also be replicated
+    // and hence accurate if it's a non-transactional table. For transactional table we
+    // do not replicate stats yet.
+    return AcidUtils.isTransactionalTable(table.getParameters());
+  }
+
   private final static class TaskInformation {
     public List<BucketCol> bucketCols = null;
     public List<SortCol> sortCols = null;
@@ -399,24 +419,10 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
               + " into " + tbd.getTable().getTableName());
           }
 
-          boolean resetStatistics;
-          if (hasFollowingStatsTask()) {
-            // If there's a follow-on stats task then the stats will be correct after load, so don't
-            // need to reset the statistics.
-            resetStatistics = false;
-          } else if (!work.getIsInReplicationScope()) {
-            // If the load is not happening during replication and there is not follow-on stats
-            // task, stats will be inaccurate after load and so need to be reset.
-            resetStatistics = true;
-          } else {
-            // If we are loading a table during replication, the stats will also be replicated
-            // and hence accurate if it's a non-transactional table. For transactional table we
-            // do not replicate stats yet.
-            resetStatistics = AcidUtils.isTransactionalTable(table.getParameters());
-          }
           db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getLoadFileType(),
-              work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isFullAcidOp, resetStatistics,
-              tbd.getWriteId(), tbd.getStmtId(), tbd.isInsertOverwrite());
+                  work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isFullAcidOp,
+                  resetStatisticsProps(table), tbd.getWriteId(), tbd.getStmtId(),
+                  tbd.isInsertOverwrite());
           if (work.getOutputs() != null) {
             DDLTask.addIfAbsentByName(new WriteEntity(table,
               getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs());
@@ -513,12 +519,12 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
     }
 
     db.loadPartition(tbd.getSourcePath(), db.getTable(tbd.getTable().getTableName()),
-        tbd.getPartitionSpec(), tbd.getLoadFileType(), tbd.getInheritTableSpecs(),
-        tbd.getInheritLocation(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(),
-         work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
-            !tbd.isMmTable(),
-         hasFollowingStatsTask(),
-        tbd.getWriteId(), tbd.getStmtId(), tbd.isInsertOverwrite());
+            tbd.getPartitionSpec(), tbd.getLoadFileType(), tbd.getInheritTableSpecs(),
+            tbd.getInheritLocation(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(),
+            work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
+                    !tbd.isMmTable(),
+            resetStatisticsProps(table), tbd.getWriteId(), tbd.getStmtId(),
+            tbd.isInsertOverwrite());
     Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
 
     // See the comment inside updatePartitionBucketSortColumns.
@@ -563,7 +569,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
             !tbd.isMmTable(),
         work.getLoadTableWork().getWriteId(),
         tbd.getStmtId(),
-        hasFollowingStatsTask(),
+        resetStatisticsProps(table),
         work.getLoadTableWork().getWriteType(),
         tbd.isInsertOverwrite());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
index d57cbd1..076165a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.TableEvent;
@@ -186,6 +188,17 @@ public class FSTableEvent implements TableEvent {
             Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString());
       }
       partsDesc.setReplicationSpec(replicationSpec());
+
+      // Right now, we do not have a way of associating a writeId with statistics for a table
+      // converted to a transactional table if it was non-transactional on the source. So, do not
+      // update statistics for converted tables even if available on the source.
+      if (partition.isSetColStats() && !replicationSpec().isMigratingToTxnTable()) {
+        ColumnStatistics colStats = partition.getColStats();
+        ColumnStatisticsDesc colStatsDesc = new ColumnStatisticsDesc(colStats.getStatsDesc());
+        colStatsDesc.setTableName(tblDesc.getTableName());
+        colStatsDesc.setDbName(tblDesc.getDatabaseName());
+        partDesc.setColStats(new ColumnStatistics(colStatsDesc, colStats.getStatsObj()));
+      }
       return partsDesc;
     } catch (Exception e) {
       throw new SemanticException(e);

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
index e182f31..ad41276 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
@@ -313,6 +313,7 @@ public class LoadPartitions {
       loadTableWork.setInheritTableSpecs(false);
       moveWork.setLoadTableWork(loadTableWork);
     }
+    moveWork.setIsInReplicationScope(event.replicationSpec().isInReplicationScope());
 
     return TaskFactory.get(moveWork, context.hiveConf);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index cd59efb..9ab3a9e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1895,18 +1895,20 @@ public class Hive {
    *          If the source directory is LOCAL
    * @param isAcidIUDoperation
    *          true if this is an ACID operation Insert/Update/Delete operation
-   * @param hasFollowingStatsTask
-   *          true if there is a following task which updates the stats, so, this method need not update.
+   * @param resetStatistics
+   *          if true, reset the statistics. If false, do not reset statistics.
    * @param writeId write ID allocated for the current load operation
    * @param stmtId statement ID of the current load statement
    * @param isInsertOverwrite
    * @return Partition object being loaded with data
    */
   public Partition loadPartition(Path loadPath, Table tbl, Map<String, String> partSpec,
-      LoadFileType loadFileType, boolean inheritTableSpecs, boolean inheritLocation,
-      boolean isSkewedStoreAsSubdir,
-      boolean isSrcLocal, boolean isAcidIUDoperation, boolean hasFollowingStatsTask, Long writeId,
-      int stmtId, boolean isInsertOverwrite) throws HiveException {
+                                 LoadFileType loadFileType, boolean inheritTableSpecs,
+                                 boolean inheritLocation,
+                                 boolean isSkewedStoreAsSubdir,
+                                 boolean isSrcLocal, boolean isAcidIUDoperation,
+                                 boolean resetStatistics, Long writeId,
+                                 int stmtId, boolean isInsertOverwrite) throws HiveException {
 
     PerfLogger perfLogger = SessionState.getPerfLogger();
     perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_PARTITION);
@@ -1923,7 +1925,7 @@ public class Hive {
     Partition newTPart = loadPartitionInternal(loadPath, tbl, partSpec, oldPart,
             loadFileType, inheritTableSpecs,
             inheritLocation, isSkewedStoreAsSubdir, isSrcLocal, isAcidIUDoperation,
-            hasFollowingStatsTask, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles);
+            resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles);
 
     AcidUtils.TableSnapshot tableSnapshot = null;
     if (isTxnTable) {
@@ -1941,7 +1943,7 @@ public class Hive {
     }
 
     if (oldPart == null) {
-      addPartitionToMetastore(newTPart, hasFollowingStatsTask, tbl, tableSnapshot);
+      addPartitionToMetastore(newTPart, resetStatistics, tbl, tableSnapshot);
       // For acid table, add the acid_write event with file list at the time of load itself. But
       // it should be done after partition is created.
       if (isTxnTable && (null != newFiles)) {
@@ -1949,7 +1951,7 @@ public class Hive {
       }
     } else {
       try {
-        setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, tableSnapshot);
+        setStatsPropAndAlterPartition(resetStatistics, tbl, newTPart, tableSnapshot);
       } catch (TException e) {
         LOG.error(StringUtils.stringifyException(e));
         throw new HiveException(e);
@@ -1987,8 +1989,8 @@ public class Hive {
    *          If the source directory is LOCAL
    * @param isAcidIUDoperation
    *          true if this is an ACID operation Insert/Update/Delete operation
-   * @param hasFollowingStatsTask
-   *          true if there is a following task which updates the stats, so, this method need not update.
+   * @param resetStatistics
+   *          if true, reset the statistics. Do not reset statistics if false.
    * @param writeId
    *          write ID allocated for the current load operation
    * @param stmtId
@@ -2002,7 +2004,7 @@ public class Hive {
   private Partition loadPartitionInternal(Path loadPath, Table tbl, Map<String, String> partSpec,
                         Partition oldPart, LoadFileType loadFileType, boolean inheritTableSpecs,
                         boolean inheritLocation, boolean isSkewedStoreAsSubdir,
-                        boolean isSrcLocal, boolean isAcidIUDoperation, boolean hasFollowingStatsTask,
+                        boolean isSrcLocal, boolean isAcidIUDoperation, boolean resetStatistics,
                         Long writeId, int stmtId, boolean isInsertOverwrite,
                         boolean isTxnTable, List<Path> newFiles) throws HiveException {
     Path tblDataLocationPath =  tbl.getDataLocation();
@@ -2120,7 +2122,7 @@ public class Hive {
       }
 
       // column stats will be inaccurate
-      if (!hasFollowingStatsTask) {
+      if (resetStatistics) {
         StatsSetupConst.clearColumnStatsState(newTPart.getParameters());
       }
 
@@ -2168,7 +2170,7 @@ public class Hive {
     }
   }
 
-  private void addPartitionToMetastore(Partition newTPart, boolean hasFollowingStatsTask,
+  private void addPartitionToMetastore(Partition newTPart, boolean resetStatistics,
                                        Table tbl, TableSnapshot tableSnapshot) throws HiveException{
     try {
       LOG.debug("Adding new partition " + newTPart.getSpec());
@@ -2187,7 +2189,7 @@ public class Hive {
       // In that case, we want to retry with alterPartition.
       LOG.debug("Caught AlreadyExistsException, trying to alter partition instead");
       try {
-        setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, tableSnapshot);
+        setStatsPropAndAlterPartition(resetStatistics, tbl, newTPart, tableSnapshot);
       } catch (TException e) {
         LOG.error(StringUtils.stringifyException(e));
         throw new HiveException(e);
@@ -2207,7 +2209,7 @@ public class Hive {
   }
 
   private void addPartitionsToMetastore(List<Partition> partitions,
-                                        boolean hasFollowingStatsTask, Table tbl,
+                                        boolean resetStatistics, Table tbl,
                                         List<AcidUtils.TableSnapshot> tableSnapshots)
                                         throws HiveException {
     try {
@@ -2236,7 +2238,7 @@ public class Hive {
       LOG.debug("Caught AlreadyExistsException, trying to add partitions one by one.");
       assert partitions.size() == tableSnapshots.size();
       for (int i = 0; i < partitions.size(); i++) {
-        addPartitionToMetastore(partitions.get(i), hasFollowingStatsTask, tbl,
+        addPartitionToMetastore(partitions.get(i), resetStatistics, tbl,
                 tableSnapshots.get(i));
       }
     } catch (Exception e) {
@@ -2339,10 +2341,10 @@ public class Hive {
     return;
   }
 
-  private void setStatsPropAndAlterPartition(boolean hasFollowingStatsTask, Table tbl,
-      Partition newTPart, TableSnapshot tableSnapshot) throws TException {
+  private void setStatsPropAndAlterPartition(boolean resetStatistics, Table tbl,
+                                             Partition newTPart, TableSnapshot tableSnapshot) throws TException {
     EnvironmentContext ec = new EnvironmentContext();
-    if (hasFollowingStatsTask) {
+    if (!resetStatistics) {
       ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
     }
     LOG.debug("Altering existing partition " + newTPart.getSpec());
@@ -2351,14 +2353,14 @@ public class Hive {
         tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList());
   }
 
-  private void setStatsPropAndAlterPartitions(boolean hasFollowingStatsTask, Table tbl,
+  private void setStatsPropAndAlterPartitions(boolean resetStatistics, Table tbl,
                                              List<Partition> partitions,
                                              long writeId) throws TException {
     if (partitions.isEmpty()) {
       return;
     }
     EnvironmentContext ec = new EnvironmentContext();
-    if (hasFollowingStatsTask) {
+    if (!resetStatistics) {
       ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
     }
     if (LOG.isDebugEnabled()) {
@@ -2548,13 +2550,14 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @param numDP number of dynamic partitions
    * @param isAcid true if this is an ACID operation
    * @param writeId writeId, can be 0 unless isAcid == true
+   * @param resetStatistics if true, reset statistics. Do not reset statistics otherwise.
    * @return partition map details (PartitionSpec and Partition)
    * @throws HiveException
    */
   public Map<Map<String, String>, Partition> loadDynamicPartitions(final Path loadPath,
       final String tableName, final Map<String, String> partSpec, final LoadFileType loadFileType,
       final int numDP, final int numLB, final boolean isAcid, final long writeId, final int stmtId,
-      final boolean hasFollowingStatsTask, final AcidUtils.Operation operation,
+      final boolean resetStatistics, final AcidUtils.Operation operation,
       boolean isInsertOverwrite) throws HiveException {
 
     PerfLogger perfLogger = SessionState.getPerfLogger();
@@ -2630,7 +2633,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
           // load the partition
           Partition partition = loadPartitionInternal(entry.getKey(), tbl,
                   fullPartSpec, oldPartition, loadFileType, true, false, numLB > 0, false, isAcid,
-                  hasFollowingStatsTask, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles);
+                  resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles);
           // if the partition already existed before the loading, no need to add it again to the
           // metastore
 
@@ -2663,7 +2666,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
                   + " loadFileType=" + loadFileType.toString() + ", "
                   + " listBucketingLevel=" + numLB + ", "
                   + " isAcid=" + isAcid + ", "
-                  + " hasFollowingStatsTask=" + hasFollowingStatsTask, e);
+                  + " resetStatistics=" + resetStatistics, e);
           throw e;
         }
       });
@@ -2690,7 +2693,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
                       .filter(entry -> !entry.getValue().hasOldPartition)
                       .map(entry -> entry.getValue().partition)
                       .collect(Collectors.toList()),
-              hasFollowingStatsTask,
+              resetStatistics,
               tbl,
               partitionDetailsMap.entrySet()
                       .stream()
@@ -2707,7 +2710,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
         }
       }
 
-      setStatsPropAndAlterPartitions(hasFollowingStatsTask, tbl,
+      setStatsPropAndAlterPartitions(resetStatistics, tbl,
               partitionDetailsMap.entrySet().stream()
                       .filter(entry -> entry.getValue().hasOldPartition)
                       .map(entry -> entry.getValue().partition)
@@ -2731,7 +2734,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
               append("loadFileType=" + loadFileType.toString() + ", ").
               append("listBucketingLevel=" + numLB + ", ").
               append("isAcid=" + isAcid + ", ").
-              append("hasFollowingStatsTask=" + hasFollowingStatsTask);
+              append("resetStatistics=" + resetStatistics);
 
       LOG.error(logMsg.toString(), e);
       throw e;
@@ -2981,8 +2984,15 @@ private void constructOneLBLocationMap(FileStatus fSta,
             : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) {
           out.add(new Partition(tbl, outPart));
         }
+        EnvironmentContext ec = new EnvironmentContext();
+        // In case of replication statistics is obtained from the source, so do not update those
+        // on replica. Since we are not replicating statistics for transactional tables, do not do
+        // so for a partition of a transactional table right now.
+        if (!AcidUtils.isTransactionalTable(tbl)) {
+          ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
+        }
         getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),
-            partsToAlter, new EnvironmentContext(), validWriteIdList, writeId);
+            partsToAlter, ec, validWriteIdList, writeId);
 
         for ( org.apache.hadoop.hive.metastore.api.Partition outPart :
         getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){
@@ -3033,6 +3043,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
     if (addSpec.getSortCols() != null) {
       part.getSd().setSortCols(addSpec.getSortCols());
     }
+    if (addSpec.getColStats() != null) {
+      part.setColStats(addSpec.getColStats());
+    }
     return part;
   }
 
@@ -3629,6 +3642,23 @@ private void constructOneLBLocationMap(FileStatus fSta,
    */
   public List<Partition> getPartitionsByNames(Table tbl, List<String> partNames)
       throws HiveException {
+    return getPartitionsByNames(tbl, partNames, false);
+  }
+
+  /**
+   * Get all partitions of the table that matches the list of given partition names.
+   *
+   * @param tbl
+   *          object for which partition is needed. Must be partitioned.
+   * @param partNames
+   *          list of partition names
+   * @param getColStats
+   *          if true, Partition object includes column statistics for that partition.
+   * @return list of partition objects
+   * @throws HiveException
+   */
+  public List<Partition> getPartitionsByNames(Table tbl, List<String> partNames, boolean getColStats)
+      throws HiveException {
 
     if (!tbl.isPartitioned()) {
       throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
@@ -3644,7 +3674,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       for (int i = 0; i < nBatches; ++i) {
         List<org.apache.hadoop.hive.metastore.api.Partition> tParts =
           getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(),
-          partNames.subList(i*batchSize, (i+1)*batchSize));
+            partNames.subList(i*batchSize, (i+1)*batchSize), getColStats);
         if (tParts != null) {
           for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) {
             partitions.add(new Partition(tbl, tpart));
@@ -3655,7 +3685,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       if (nParts > nBatches * batchSize) {
         List<org.apache.hadoop.hive.metastore.api.Partition> tParts =
           getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(),
-          partNames.subList(nBatches*batchSize, nParts));
+            partNames.subList(nBatches*batchSize, nParts), getColStats);
         if (tParts != null) {
           for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) {
             partitions.add(new Partition(tbl, tpart));

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
index 22ba14c..6418bd5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
@@ -24,7 +24,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-
 /**
  * PartitionIterable - effectively a lazy Iterable<Partition>
  *
@@ -100,7 +99,7 @@ public class PartitionIterable implements Iterable<Partition> {
           batch_counter++;
         }
         try {
-          batchIter = db.getPartitionsByNames(table,nameBatch).iterator();
+          batchIter = db.getPartitionsByNames(table, nameBatch, getColStats).iterator();
         } catch (HiveException e) {
           throw new RuntimeException(e);
         }
@@ -130,6 +129,7 @@ public class PartitionIterable implements Iterable<Partition> {
   private Map<String, String> partialPartitionSpec = null;
   private List<String> partitionNames = null;
   private int batch_size;
+  private boolean getColStats = false;
 
   /**
    * Dummy constructor, which simply acts as an iterator on an already-present
@@ -146,12 +146,22 @@ public class PartitionIterable implements Iterable<Partition> {
    * a Hive object and a table object, and a partial partition spec.
    */
   public PartitionIterable(Hive db, Table table, Map<String, String> partialPartitionSpec,
-      int batch_size) throws HiveException {
+                           int batch_size) throws HiveException {
+    this(db, table, partialPartitionSpec, batch_size, false);
+  }
+
+  /**
+   * Primary constructor that fetches all partitions in a given table, given
+   * a Hive object and a table object, and a partial partition spec.
+   */
+  public PartitionIterable(Hive db, Table table, Map<String, String> partialPartitionSpec,
+                           int batch_size, boolean getColStats) throws HiveException {
     this.currType = Type.LAZY_FETCH_PARTITIONS;
     this.db = db;
     this.table = table;
     this.partialPartitionSpec = partialPartitionSpec;
     this.batch_size = batch_size;
+    this.getColStats = getColStats;
 
     if (this.partialPartitionSpec == null){
       partitionNames = db.getPartitionNames(

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 83cb3ea..410868c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -1102,16 +1102,27 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
     }
     return matchedParts;
   }
+
   /**
    * partNames are like "p=1/q=2" type strings.  The values (RHS of =) are escaped.
    */
   @Override
   public List<Partition> getPartitionsByNames(String db_name, String tblName,
-      List<String> partNames) throws TException {
+                                              List<String> partNames) throws TException {
+    return getPartitionsByNames(db_name, tblName, partNames, false);
+  }
+
+  /**
+   * partNames are like "p=1/q=2" type strings.  The values (RHS of =) are escaped.
+   */
+  @Override
+  public List<Partition> getPartitionsByNames(String db_name, String tblName,
+                                              List<String> partNames, boolean getColStats)
+          throws TException {
     org.apache.hadoop.hive.metastore.api.Table table = getTempTable(db_name, tblName);
     if (table == null) {
       //(assume) not a temp table - Try underlying client
-      return super.getPartitionsByNames(db_name, tblName, partNames);
+      return super.getPartitionsByNames(db_name, tblName, partNames, getColStats);
     }
     TempTable tt = getTempTable(table);
     if(tt == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 6102339..8242b47 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -341,7 +341,11 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           getBaseAddPartitionDescFromPartition(fromPath, dbname, tblDesc, partition,
               replicationSpec, x.getConf());
       if (inReplicationScope){
-        StatsSetupConst.setBasicStatsState(partsDesc.getPartition(0).getPartParams(), StatsSetupConst.FALSE);
+        // Statistics for a non-transactional table will be replicated separately. Don't bother
+        // with it here.
+        if (TxnUtils.isTransactionalTable(tblDesc.getTblProps())) {
+          StatsSetupConst.setBasicStatsState(partsDesc.getPartition(0).getPartParams(), StatsSetupConst.FALSE);
+        }
       }
       partitionDescs.add(partsDesc);
     }
@@ -1190,22 +1194,26 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       Task t = createTableTask(tblDesc, x);
       table = createNewTableMetadataObject(tblDesc, true);
 
-      if (!replicationSpec.isMetadataOnly()) {
-        if (isPartitioned(tblDesc)) {
-          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
-            addPartitionDesc.setReplicationSpec(replicationSpec);
+      if (isPartitioned(tblDesc)) {
+        for (AddPartitionDesc addPartitionDesc : partitionDescs) {
+          addPartitionDesc.setReplicationSpec(replicationSpec);
+          if (!replicationSpec.isMetadataOnly()) {
             t.addDependentTask(
-                addSinglePartition(tblDesc, table, wh, addPartitionDesc, replicationSpec, x,
-                    writeId, stmtId));
-            if (updatedMetadata != null) {
-              updatedMetadata.addPartition(table.getDbName(), table.getTableName(),
-                      addPartitionDesc.getPartition(0).getPartSpec());
-            }
+                    addSinglePartition(tblDesc, table, wh, addPartitionDesc, replicationSpec, x,
+                            writeId, stmtId));
+          } else {
+            t.addDependentTask(alterSinglePartition(tblDesc, table, wh, addPartitionDesc,
+                    replicationSpec, null, x));
+          }
+          if (updatedMetadata != null) {
+            updatedMetadata.addPartition(table.getDbName(), table.getTableName(),
+                    addPartitionDesc.getPartition(0).getPartSpec());
           }
-        } else {
-          x.getLOG().debug("adding dependent CopyWork/MoveWork for table");
-          t.addDependentTask(loadTable(fromURI, table, replicationSpec.isReplace(), new Path(tblDesc.getLocation()), replicationSpec, x, writeId, stmtId));
         }
+      } else if (!replicationSpec.isMetadataOnly()) {
+        x.getLOG().debug("adding dependent CopyWork/MoveWork for table");
+        t.addDependentTask(loadTable(fromURI, table, replicationSpec.isReplace(),
+                new Path(tblDesc.getLocation()), replicationSpec, x, writeId, stmtId));
       }
 
       if (dropTblTask != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
index adc9446..1eee3fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -108,8 +109,16 @@ public class TableExport {
           if (replicationSpec.isMetadataOnly()) {
             return null;
           } else {
+            // For transactional tables, we do not replicate statistics right now, so don't
+            // include statistics in Partition object as well.
+            boolean getColStats;
+            if (AcidUtils.isTransactionalTable(tableSpec.tableHandle)) {
+              getColStats = false;
+            } else {
+              getColStats = true;
+            }
             return new PartitionIterable(db, tableSpec.tableHandle, null, conf.getIntVar(
-                HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
+                HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX), getColStats);
           }
         } else {
           // PARTITIONS specified - partitions inside tableSpec

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java
index 0b7f910..415e954 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage;
@@ -51,6 +52,13 @@ class AddPartitionHandler extends AbstractEventHandler {
   public void handle(Context withinContext) throws Exception {
     LOG.info("Processing#{} ADD_PARTITION message : {}", fromEventId(), eventMessageAsJSON);
 
+    // We do not dump partitions during metadata only bootstrap dump (See TableExport
+    // .getPartitions(), for bootstrap dump we pass tableSpec with TABLE_ONLY set.). So don't
+    // dump partition related events for metadata-only dump.
+    if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
+      return;
+    }
+
     AddPartitionMessage apm = (AddPartitionMessage) eventMessage;
     org.apache.hadoop.hive.metastore.api.Table tobj = apm.getTableObj();
     if (tobj == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java
index d81408e..1b91e3e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.ql.parse.repl.dump.events;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
 import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -91,6 +92,13 @@ class AlterPartitionHandler extends AbstractEventHandler<AlterPartitionMessage>
   public void handle(Context withinContext) throws Exception {
     LOG.info("Processing#{} ALTER_PARTITION message : {}", fromEventId(), eventMessageAsJSON);
 
+    // We do not dump partitions during metadata only bootstrap dump (See TableExport
+    // .getPartitions(), for bootstrap dump we pass tableSpec with TABLE_ONLY set.). So don't
+    // dump partition related events for metadata-only dump.
+    if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
+      return;
+    }
+
     Table qlMdTable = new Table(tableObject);
     if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, withinContext.hiveConf)) {
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java
index e2a40d2..272f5ce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.parse.repl.dump.events;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 
 import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage;
@@ -38,6 +39,14 @@ class DropPartitionHandler extends AbstractEventHandler<DropPartitionMessage> {
   @Override
   public void handle(Context withinContext) throws Exception {
     LOG.info("Processing#{} DROP_PARTITION message : {}", fromEventId(), eventMessageAsJSON);
+
+    // We do not dump partitions during metadata only bootstrap dump (See TableExport
+    // .getPartitions(), for bootstrap dump we pass tableSpec with TABLE_ONLY set.). So don't
+    // dump partition related events for metadata-only dump.
+    if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
+      return;
+    }
+
     DumpMetaData dmd = withinContext.createDmd(this);
     dmd.setPayload(eventMessageAsJSON);
     dmd.write();

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdatePartColStatHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdatePartColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdatePartColStatHandler.java
index 332005b..f3f00c5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdatePartColStatHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdatePartColStatHandler.java
@@ -19,7 +19,10 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events;
 
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.messaging.UpdatePartitionColumnStatMessage;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
 import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 class UpdatePartColStatHandler extends AbstractEventHandler<UpdatePartitionColumnStatMessage> {
@@ -35,7 +38,32 @@ class UpdatePartColStatHandler extends AbstractEventHandler<UpdatePartitionColum
 
   @Override
   public void handle(Context withinContext) throws Exception {
-    LOG.info("Processing#{} UpdateTableColumnStat message : {}", fromEventId(), eventMessageAsJSON);
+    LOG.info("Processing#{} UpdatePartitionTableColumnStat message : {}", fromEventId(),
+            eventMessageAsJSON);
+
+    org.apache.hadoop.hive.metastore.api.Table tableObj = eventMessage.getTableObject();
+    if (tableObj == null) {
+      LOG.debug("Event#{} was an event of type {} with no table listed", fromEventId(),
+              event.getEventType());
+      return;
+    }
+
+    // Statistics without any data does not make sense.
+    if (withinContext.replicationSpec.isMetadataOnly()) {
+      return;
+    }
+
+    // For now we do not dump statistics for a transactional table since replicating the same is
+    // not supported.
+    if (AcidUtils.isTransactionalTable(tableObj)) {
+      return;
+    }
+
+    if (!Utils.shouldReplicate(withinContext.replicationSpec, new Table(tableObj),
+                              withinContext.hiveConf)) {
+      return;
+    }
+
     DumpMetaData dmd = withinContext.createDmd(this);
     dmd.setPayload(eventMessageAsJSON);
     dmd.write();

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java
index e50a2bc..bd8182d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/UpdateTableColStatHandler.java
@@ -36,6 +36,7 @@ class UpdateTableColStatHandler extends AbstractEventHandler<UpdateTableColumnSt
 
   @Override
   public void handle(Context withinContext) throws Exception {
+    LOG.info("Processing#{} UpdateTableColumnStat message : {}", fromEventId(), eventMessageAsJSON);
     Table qlMdTable = new Table(eventMessage.getTableObject());
     if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, withinContext.hiveConf)) {
       return;
@@ -50,7 +51,6 @@ class UpdateTableColStatHandler extends AbstractEventHandler<UpdateTableColumnSt
       return;
     }
 
-    LOG.info("Processing#{} UpdateTableColumnStat message : {}", fromEventId(), eventMessageAsJSON);
     DumpMetaData dmd = withinContext.createDmd(this);
     dmd.setPayload(eventMessageAsJSON);
     dmd.write();

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java
index 4ba2ac4..02e938e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java
@@ -17,10 +17,13 @@
  */
 package org.apache.hadoop.hive.ql.parse.repl.load.message;
 
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.messaging.UpdatePartitionColumnStatMessage;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork;
+import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
 
 import java.io.Serializable;
 import java.util.Collections;
@@ -34,10 +37,21 @@ public class UpdatePartColStatHandler extends AbstractMessageHandler {
   @Override
   public List<Task<? extends Serializable>> handle(Context context)
       throws SemanticException {
-    context.log.info("Replication of partition stat update event is not supported yet");
+    UpdatePartitionColumnStatMessage upcsm =
+            deserializer.getUpdatePartitionColumnStatMessage(context.dmd.getPayload());
+
+    // Update tablename and database name in the statistics object
+    ColumnStatistics colStats = upcsm.getColumnStatistics();
+    ColumnStatisticsDesc colStatsDesc = colStats.getStatsDesc();
+    if (!context.isTableNameEmpty()) {
+      colStatsDesc.setTableName(context.tableName);
+    }
     if (!context.isDbNameEmpty()) {
-      updatedMetadata.set(context.dmd.getEventTo().toString(), context.dbName, context.tableName, null);
+      colStatsDesc.setDbName(context.dbName);
+      updatedMetadata.set(context.dmd.getEventTo().toString(), context.dbName, context.tableName,
+                  null);
     }
-    return Collections.singletonList(TaskFactory.get(new DependencyCollectionWork(), context.hiveConf));
+    return Collections.singletonList(TaskFactory.get(new ColumnStatsUpdateWork(colStats),
+            context.hiveConf));
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java
index d3a87f9..26cb217 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java
@@ -25,6 +25,7 @@ import java.util.Map;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 
 /**
@@ -58,6 +59,7 @@ public class AddPartitionDesc extends DDLDesc implements Serializable {
     Map<String, String> serdeParams = null;
     List<String> bucketCols = null;
     List<Order> sortCols = null;
+    ColumnStatistics colStats = null;
 
     public Map<String, String> getPartSpec() {
       return partSpec;
@@ -145,6 +147,10 @@ public class AddPartitionDesc extends DDLDesc implements Serializable {
     public void setOutputFormat(String outputFormat) {
       this.outputFormat = outputFormat;
     }
+
+    public ColumnStatistics getColStats() { return colStats; }
+
+    public void setColStats(ColumnStatistics colStats) { this.colStats = colStats; }
   }
 
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
index 7dcfc17..9661778 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
@@ -351,13 +351,13 @@ import org.slf4j.LoggerFactory;
           case 1: // TXN_IDS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list594 = iprot.readListBegin();
-                struct.txn_ids = new ArrayList<Long>(_list594.size);
-                long _elem595;
-                for (int _i596 = 0; _i596 < _list594.size; ++_i596)
+                org.apache.thrift.protocol.TList _list610 = iprot.readListBegin();
+                struct.txn_ids = new ArrayList<Long>(_list610.size);
+                long _elem611;
+                for (int _i612 = 0; _i612 < _list610.size; ++_i612)
                 {
-                  _elem595 = iprot.readI64();
-                  struct.txn_ids.add(_elem595);
+                  _elem611 = iprot.readI64();
+                  struct.txn_ids.add(_elem611);
                 }
                 iprot.readListEnd();
               }
@@ -383,9 +383,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(TXN_IDS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size()));
-          for (long _iter597 : struct.txn_ids)
+          for (long _iter613 : struct.txn_ids)
           {
-            oprot.writeI64(_iter597);
+            oprot.writeI64(_iter613);
           }
           oprot.writeListEnd();
         }
@@ -410,9 +410,9 @@ import org.slf4j.LoggerFactory;
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.txn_ids.size());
-        for (long _iter598 : struct.txn_ids)
+        for (long _iter614 : struct.txn_ids)
         {
-          oprot.writeI64(_iter598);
+          oprot.writeI64(_iter614);
         }
       }
     }
@@ -421,13 +421,13 @@ import org.slf4j.LoggerFactory;
     public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list599 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-        struct.txn_ids = new ArrayList<Long>(_list599.size);
-        long _elem600;
-        for (int _i601 = 0; _i601 < _list599.size; ++_i601)
+        org.apache.thrift.protocol.TList _list615 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.txn_ids = new ArrayList<Long>(_list615.size);
+        long _elem616;
+        for (int _i617 = 0; _i617 < _list615.size; ++_i617)
         {
-          _elem600 = iprot.readI64();
-          struct.txn_ids.add(_elem600);
+          _elem616 = iprot.readI64();
+          struct.txn_ids.add(_elem616);
         }
       }
       struct.setTxn_idsIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
index 9c33229..d678c02 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
@@ -816,13 +816,13 @@ import org.slf4j.LoggerFactory;
           case 5: // PARTITIONNAMES
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list724 = iprot.readListBegin();
-                struct.partitionnames = new ArrayList<String>(_list724.size);
-                String _elem725;
-                for (int _i726 = 0; _i726 < _list724.size; ++_i726)
+                org.apache.thrift.protocol.TList _list740 = iprot.readListBegin();
+                struct.partitionnames = new ArrayList<String>(_list740.size);
+                String _elem741;
+                for (int _i742 = 0; _i742 < _list740.size; ++_i742)
                 {
-                  _elem725 = iprot.readString();
-                  struct.partitionnames.add(_elem725);
+                  _elem741 = iprot.readString();
+                  struct.partitionnames.add(_elem741);
                 }
                 iprot.readListEnd();
               }
@@ -872,9 +872,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size()));
-          for (String _iter727 : struct.partitionnames)
+          for (String _iter743 : struct.partitionnames)
           {
-            oprot.writeString(_iter727);
+            oprot.writeString(_iter743);
           }
           oprot.writeListEnd();
         }
@@ -910,9 +910,9 @@ import org.slf4j.LoggerFactory;
       oprot.writeString(struct.tablename);
       {
         oprot.writeI32(struct.partitionnames.size());
-        for (String _iter728 : struct.partitionnames)
+        for (String _iter744 : struct.partitionnames)
         {
-          oprot.writeString(_iter728);
+          oprot.writeString(_iter744);
         }
       }
       BitSet optionals = new BitSet();
@@ -937,13 +937,13 @@ import org.slf4j.LoggerFactory;
       struct.tablename = iprot.readString();
       struct.setTablenameIsSet(true);
       {
-        org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-        struct.partitionnames = new ArrayList<String>(_list729.size);
-        String _elem730;
-        for (int _i731 = 0; _i731 < _list729.size; ++_i731)
+        org.apache.thrift.protocol.TList _list745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.partitionnames = new ArrayList<String>(_list745.size);
+        String _elem746;
+        for (int _i747 = 0; _i747 < _list745.size; ++_i747)
         {
-          _elem730 = iprot.readString();
-          struct.partitionnames.add(_elem730);
+          _elem746 = iprot.readString();
+          struct.partitionnames.add(_elem746);
         }
       }
       struct.setPartitionnamesIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
index d05e7ba..3dd36da 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
@@ -716,13 +716,13 @@ import org.slf4j.LoggerFactory;
           case 3: // TXN_IDS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list650 = iprot.readListBegin();
-                struct.txnIds = new ArrayList<Long>(_list650.size);
-                long _elem651;
-                for (int _i652 = 0; _i652 < _list650.size; ++_i652)
+                org.apache.thrift.protocol.TList _list666 = iprot.readListBegin();
+                struct.txnIds = new ArrayList<Long>(_list666.size);
+                long _elem667;
+                for (int _i668 = 0; _i668 < _list666.size; ++_i668)
                 {
-                  _elem651 = iprot.readI64();
-                  struct.txnIds.add(_elem651);
+                  _elem667 = iprot.readI64();
+                  struct.txnIds.add(_elem667);
                 }
                 iprot.readListEnd();
               }
@@ -742,14 +742,14 @@ import org.slf4j.LoggerFactory;
           case 5: // SRC_TXN_TO_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list653 = iprot.readListBegin();
-                struct.srcTxnToWriteIdList = new ArrayList<TxnToWriteId>(_list653.size);
-                TxnToWriteId _elem654;
-                for (int _i655 = 0; _i655 < _list653.size; ++_i655)
+                org.apache.thrift.protocol.TList _list669 = iprot.readListBegin();
+                struct.srcTxnToWriteIdList = new ArrayList<TxnToWriteId>(_list669.size);
+                TxnToWriteId _elem670;
+                for (int _i671 = 0; _i671 < _list669.size; ++_i671)
                 {
-                  _elem654 = new TxnToWriteId();
-                  _elem654.read(iprot);
-                  struct.srcTxnToWriteIdList.add(_elem654);
+                  _elem670 = new TxnToWriteId();
+                  _elem670.read(iprot);
+                  struct.srcTxnToWriteIdList.add(_elem670);
                 }
                 iprot.readListEnd();
               }
@@ -786,9 +786,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(TXN_IDS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size()));
-            for (long _iter656 : struct.txnIds)
+            for (long _iter672 : struct.txnIds)
             {
-              oprot.writeI64(_iter656);
+              oprot.writeI64(_iter672);
             }
             oprot.writeListEnd();
           }
@@ -807,9 +807,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.srcTxnToWriteIdList.size()));
-            for (TxnToWriteId _iter657 : struct.srcTxnToWriteIdList)
+            for (TxnToWriteId _iter673 : struct.srcTxnToWriteIdList)
             {
-              _iter657.write(oprot);
+              _iter673.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -849,9 +849,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetTxnIds()) {
         {
           oprot.writeI32(struct.txnIds.size());
-          for (long _iter658 : struct.txnIds)
+          for (long _iter674 : struct.txnIds)
           {
-            oprot.writeI64(_iter658);
+            oprot.writeI64(_iter674);
           }
         }
       }
@@ -861,9 +861,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetSrcTxnToWriteIdList()) {
         {
           oprot.writeI32(struct.srcTxnToWriteIdList.size());
-          for (TxnToWriteId _iter659 : struct.srcTxnToWriteIdList)
+          for (TxnToWriteId _iter675 : struct.srcTxnToWriteIdList)
           {
-            _iter659.write(oprot);
+            _iter675.write(oprot);
           }
         }
       }
@@ -879,13 +879,13 @@ import org.slf4j.LoggerFactory;
       BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list660 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-          struct.txnIds = new ArrayList<Long>(_list660.size);
-          long _elem661;
-          for (int _i662 = 0; _i662 < _list660.size; ++_i662)
+          org.apache.thrift.protocol.TList _list676 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+          struct.txnIds = new ArrayList<Long>(_list676.size);
+          long _elem677;
+          for (int _i678 = 0; _i678 < _list676.size; ++_i678)
           {
-            _elem661 = iprot.readI64();
-            struct.txnIds.add(_elem661);
+            _elem677 = iprot.readI64();
+            struct.txnIds.add(_elem677);
           }
         }
         struct.setTxnIdsIsSet(true);
@@ -896,14 +896,14 @@ import org.slf4j.LoggerFactory;
       }
       if (incoming.get(2)) {
         {
-          org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.srcTxnToWriteIdList = new ArrayList<TxnToWriteId>(_list663.size);
-          TxnToWriteId _elem664;
-          for (int _i665 = 0; _i665 < _list663.size; ++_i665)
+          org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.srcTxnToWriteIdList = new ArrayList<TxnToWriteId>(_list679.size);
+          TxnToWriteId _elem680;
+          for (int _i681 = 0; _i681 < _list679.size; ++_i681)
           {
-            _elem664 = new TxnToWriteId();
-            _elem664.read(iprot);
-            struct.srcTxnToWriteIdList.add(_elem664);
+            _elem680 = new TxnToWriteId();
+            _elem680.read(iprot);
+            struct.srcTxnToWriteIdList.add(_elem680);
           }
         }
         struct.setSrcTxnToWriteIdListIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
index 193179b..dd73a69 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
@@ -354,14 +354,14 @@ import org.slf4j.LoggerFactory;
           case 1: // TXN_TO_WRITE_IDS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list666 = iprot.readListBegin();
-                struct.txnToWriteIds = new ArrayList<TxnToWriteId>(_list666.size);
-                TxnToWriteId _elem667;
-                for (int _i668 = 0; _i668 < _list666.size; ++_i668)
+                org.apache.thrift.protocol.TList _list682 = iprot.readListBegin();
+                struct.txnToWriteIds = new ArrayList<TxnToWriteId>(_list682.size);
+                TxnToWriteId _elem683;
+                for (int _i684 = 0; _i684 < _list682.size; ++_i684)
                 {
-                  _elem667 = new TxnToWriteId();
-                  _elem667.read(iprot);
-                  struct.txnToWriteIds.add(_elem667);
+                  _elem683 = new TxnToWriteId();
+                  _elem683.read(iprot);
+                  struct.txnToWriteIds.add(_elem683);
                 }
                 iprot.readListEnd();
               }
@@ -387,9 +387,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size()));
-          for (TxnToWriteId _iter669 : struct.txnToWriteIds)
+          for (TxnToWriteId _iter685 : struct.txnToWriteIds)
           {
-            _iter669.write(oprot);
+            _iter685.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -414,9 +414,9 @@ import org.slf4j.LoggerFactory;
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.txnToWriteIds.size());
-        for (TxnToWriteId _iter670 : struct.txnToWriteIds)
+        for (TxnToWriteId _iter686 : struct.txnToWriteIds)
         {
-          _iter670.write(oprot);
+          _iter686.write(oprot);
         }
       }
     }
@@ -425,14 +425,14 @@ import org.slf4j.LoggerFactory;
     public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.txnToWriteIds = new ArrayList<TxnToWriteId>(_list671.size);
-        TxnToWriteId _elem672;
-        for (int _i673 = 0; _i673 < _list671.size; ++_i673)
+        org.apache.thrift.protocol.TList _list687 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.txnToWriteIds = new ArrayList<TxnToWriteId>(_list687.size);
+        TxnToWriteId _elem688;
+        for (int _i689 = 0; _i689 < _list687.size; ++_i689)
         {
-          _elem672 = new TxnToWriteId();
-          _elem672.read(iprot);
-          struct.txnToWriteIds.add(_elem672);
+          _elem688 = new TxnToWriteId();
+          _elem688.read(iprot);
+          struct.txnToWriteIds.add(_elem688);
         }
       }
       struct.setTxnToWriteIdsIsSet(true);