You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/03/08 21:00:35 UTC

[1/3] hive git commit: HIVE-18571 : stats issues for MM tables; ACID doesn't check state for CTAS (Sergey Shelukhin, reviewed by Eugene Koifman)

Repository: hive
Updated Branches:
  refs/heads/master 7edb1d648 -> 9b36ffa92


http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
index b051961..d343911 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
@@ -606,20 +606,16 @@ public class MetaStoreUtils {
     return false;
   }
 
-  public static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
-                                             boolean madeDir, EnvironmentContext environmentContext) throws MetaException {
-    return updateTableStatsFast(db, tbl, wh, madeDir, false, environmentContext);
-  }
-
-  public static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
-                                             boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
-    if (tbl.getPartitionKeysSize() == 0) {
-      // Update stats only when unpartitioned
-      FileStatus[] fileStatuses = wh.getFileStatusesForUnpartitionedTable(db, tbl);
-      return updateTableStatsFast(tbl, fileStatuses, madeDir, forceRecompute, environmentContext);
-    } else {
-      return false;
-    }
+  public static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh, boolean madeDir,
+      boolean forceRecompute, EnvironmentContext environmentContext, boolean isCreate) throws MetaException {
+    if (tbl.getPartitionKeysSize() != 0) return false;
+    // Update stats only when unpartitioned
+    // TODO: this is also invalid for ACID tables, except for the create case by coincidence;
+    //       because the methods in metastore get all the files in the table directory without
+    //       regard for ACID state.
+    List<FileStatus> fileStatuses = wh.getFileStatusesForUnpartitionedTable(db, tbl);
+    return updateTableStatsFast(
+        tbl, fileStatuses, madeDir, forceRecompute, environmentContext, isCreate);
   }
 
   /**
@@ -632,8 +628,9 @@ public class MetaStoreUtils {
    * these parameters set
    * @return true if the stats were updated, false otherwise
    */
-  public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir,
-                                             boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
+  private static boolean updateTableStatsFast(Table tbl, List<FileStatus> fileStatus,
+      boolean newDir, boolean forceRecompute, EnvironmentContext environmentContext,
+      boolean isCreate) throws MetaException {
 
     Map<String,String> params = tbl.getParameters();
 
@@ -646,39 +643,43 @@ public class MetaStoreUtils {
       }
     }
 
-    boolean updated = false;
-    if (forceRecompute ||
-        params == null ||
-        !containsAllFastStats(params)) {
-      if (params == null) {
-        params = new HashMap<>();
-      }
-      if (!newDir) {
-        // The table location already exists and may contain data.
-        // Let's try to populate those stats that don't require full scan.
-        LOG.info("Updating table stats fast for " + tbl.getTableName());
-        populateQuickStats(fileStatus, params);
-        LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE));
-        if (environmentContext != null
-            && environmentContext.isSetProperties()
-            && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
-            StatsSetupConst.STATS_GENERATED))) {
-          StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
-        } else {
-          StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
-        }
-      }
+    if (!forceRecompute && params != null && containsAllFastStats(params)) return false;
+    if (params == null) {
+      params = new HashMap<>();
+    }
+    if (!isCreate && MetaStoreUtils.isTransactionalTable(tbl.getParameters())) {
+      // TODO: we should use AcidUtils.getAcidFilesForStats, but cannot access it from metastore.
+      LOG.warn("Not updating fast stats for a transactional table " + tbl.getTableName());
       tbl.setParameters(params);
-      updated = true;
+      return true;
     }
-    return updated;
+    if (!newDir) {
+      // The table location already exists and may contain data.
+      // Let's try to populate those stats that don't require full scan.
+      LOG.info("Updating table stats fast for " + tbl.getTableName());
+      populateQuickStats(fileStatus, params);
+      LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE));
+      if (environmentContext != null
+          && environmentContext.isSetProperties()
+          && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
+          StatsSetupConst.STATS_GENERATED))) {
+        StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
+      } else {
+        StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
+      }
+    }
+    tbl.setParameters(params);
+    return true;
   }
 
-  public static void populateQuickStats(FileStatus[] fileStatus, Map<String, String> params) {
+  /** This method is invalid for MM and ACID tables unless fileStatus comes from AcidUtils. */
+  public static void populateQuickStats(List<FileStatus> fileStatus, Map<String, String> params) {
+    // Why is this even in metastore?
+    LOG.trace("Populating quick stats based on {} files", fileStatus.size());
     int numFiles = 0;
     long tableSize = 0L;
     for (FileStatus status : fileStatus) {
-      // don't take directories into account for quick stats
+      // don't take directories into account for quick stats TODO: wtf?
       if (!status.isDir()) {
         tableSize += status.getLen();
         numFiles += 1;
@@ -687,6 +688,12 @@ public class MetaStoreUtils {
     params.put(StatsSetupConst.NUM_FILES, Integer.toString(numFiles));
     params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize));
   }
+ 
+  public static void clearQuickStats(Map<String, String> params) {
+    params.remove(StatsSetupConst.NUM_FILES);
+    params.remove(StatsSetupConst.TOTAL_SIZE);
+  }
+ 
 
   public static boolean areSameColumns(List<FieldSchema> oldCols, List<FieldSchema> newCols) {
     return ListUtils.isEqualList(oldCols, newCols);
@@ -707,16 +714,6 @@ public class MetaStoreUtils {
     }
   }
 
-  public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext)
-      throws MetaException {
-    return updatePartitionStatsFast(part, wh, false, false, environmentContext);
-  }
-
-  public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, boolean madeDir, EnvironmentContext environmentContext)
-      throws MetaException {
-    return updatePartitionStatsFast(part, wh, madeDir, false, environmentContext);
-  }
-
   /**
    * Updates the numFiles and totalSize parameters for the passed Partition by querying
    *  the warehouse if the passed Partition does not already have values for these parameters.
@@ -727,10 +724,11 @@ public class MetaStoreUtils {
    * these parameters set
    * @return true if the stats were updated, false otherwise
    */
-  public static boolean updatePartitionStatsFast(Partition part, Warehouse wh,
-                                                 boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
+  public static boolean updatePartitionStatsFast(Partition part, Table tbl, Warehouse wh,
+      boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext,
+      boolean isCreate) throws MetaException {
     return updatePartitionStatsFast(new PartitionSpecProxy.SimplePartitionWrapperIterator(part),
-        wh, madeDir, forceRecompute, environmentContext);
+        tbl, wh, madeDir, forceRecompute, environmentContext, isCreate);
   }
   /**
    * Updates the numFiles and totalSize parameters for the passed Partition by querying
@@ -742,29 +740,32 @@ public class MetaStoreUtils {
    * these parameters set
    * @return true if the stats were updated, false otherwise
    */
-  public static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionIterator part, Warehouse wh,
-                                                 boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
+  public static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionIterator part,
+      Table table, Warehouse wh, boolean madeDir, boolean forceRecompute,
+      EnvironmentContext environmentContext, boolean isCreate) throws MetaException {
     Map<String,String> params = part.getParameters();
-    boolean updated = false;
-    if (forceRecompute ||
-        params == null ||
-        !containsAllFastStats(params)) {
-      if (params == null) {
-        params = new HashMap<>();
-      }
-      if (!madeDir) {
-        // The partition location already existed and may contain data. Lets try to
-        // populate those statistics that don't require a full scan of the data.
-        LOG.warn("Updating partition stats fast for: " + part.getTableName());
-        FileStatus[] fileStatus = wh.getFileStatusesForLocation(part.getLocation());
-        populateQuickStats(fileStatus, params);
-        LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE));
-        updateBasicState(environmentContext, params);
-      }
+    if (!forceRecompute && params != null && containsAllFastStats(params)) return false;
+    if (params == null) {
+      params = new HashMap<>();
+    }
+    if (!isCreate && MetaStoreUtils.isTransactionalTable(table.getParameters())) {
+      // TODO: implement?
+      LOG.warn("Not updating fast stats for a transactional table " + table.getTableName());
       part.setParameters(params);
-      updated = true;
+      return true;
     }
-    return updated;
+    if (!madeDir) {
+      // The partition location already existed and may contain data. Lets try to
+      // populate those statistics that don't require a full scan of the data.
+      LOG.warn("Updating partition stats fast for: " + part.getTableName());
+      List<FileStatus> fileStatus = wh.getFileStatusesForLocation(part.getLocation());
+      // TODO: this is invalid for ACID tables, and we cannot access AcidUtils here.
+      populateQuickStats(fileStatus, params);
+      LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE));
+      updateBasicState(environmentContext, params);
+    }
+    part.setParameters(params);
+    return true;
   }
 
   /*
@@ -792,6 +793,12 @@ public class MetaStoreUtils {
   }
 
   /** Duplicates AcidUtils; used in a couple places in metastore. */
+  public static boolean isTransactionalTable(Map<String, String> params) {
+    String transactionalProp = params.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+    return (transactionalProp != null && "true".equalsIgnoreCase(transactionalProp));
+  }
+
+  /** Duplicates AcidUtils; used in a couple places in metastore. */
   public static boolean isInsertOnlyTableParam(Map<String, String> params) {
     String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
     return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index 2599ab1..7091c5b 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -489,7 +489,7 @@ public abstract class TestHiveMetaStore {
     part4.setSd(tbl.getSd().deepCopy());
     part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
     part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix);
-    MetaStoreUtils.updatePartitionStatsFast(part4, warehouse, null);
+    MetaStoreUtils.updatePartitionStatsFast(part4, tbl, warehouse, false, false, null, true);
     return part4;
   }
 


[3/3] hive git commit: HIVE-18571 : stats issues for MM tables; ACID doesn't check state for CTAS (Sergey Shelukhin, reviewed by Eugene Koifman)

Posted by se...@apache.org.
HIVE-18571 : stats issues for MM tables; ACID doesn't check state for CTAS (Sergey Shelukhin, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9b36ffa9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9b36ffa9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9b36ffa9

Branch: refs/heads/master
Commit: 9b36ffa92cc4e0f47ea03d8d167debe743342f5b
Parents: 7edb1d6
Author: sergey <se...@apache.org>
Authored: Thu Mar 8 12:53:20 2018 -0800
Committer: sergey <se...@apache.org>
Committed: Thu Mar 8 12:53:20 2018 -0800

----------------------------------------------------------------------
 .../hadoop/hive/common/HiveStatsUtils.java      |  14 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../hive/ql/parse/TestReplicationScenarios.java |  23 +--
 ...TestReplicationScenariosAcrossInstances.java |   4 +
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |   4 +
 .../apache/hadoop/hive/ql/exec/CopyTask.java    |   2 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |   6 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  14 +-
 .../bootstrap/load/table/LoadPartitions.java    |   2 +
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  77 +++++++--
 .../hadoop/hive/ql/io/merge/MergeFileWork.java  |  11 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  43 ++++-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   2 +
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |   5 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  34 ++--
 .../hadoop/hive/ql/plan/BasicStatsWork.java     |   4 +-
 .../ql/plan/ConditionalResolverMergeFiles.java  |   3 +-
 .../hive/ql/stats/BasicStatsNoJobTask.java      |  15 +-
 .../hadoop/hive/ql/stats/BasicStatsTask.java    |  62 +++++---
 .../hadoop/hive/ql/stats/ColStatsProcessor.java |   2 +-
 .../apache/hadoop/hive/ql/stats/Partish.java    |   4 +
 .../hive/ql/stats/fs/FSStatsAggregator.java     |  10 +-
 .../hive/ql/stats/fs/FSStatsPublisher.java      |   4 +-
 .../clientnegative/orc_change_fileformat_acid.q |   3 +
 .../clientnegative/orc_change_serde_acid.q      |   3 +
 .../clientnegative/orc_reorder_columns1_acid.q  |   3 +
 .../clientnegative/orc_reorder_columns2_acid.q  |   3 +
 .../clientnegative/orc_replace_columns1_acid.q  |   3 +
 .../clientnegative/orc_replace_columns2_acid.q  |   3 +
 .../clientnegative/orc_replace_columns3_acid.q  |   3 +
 .../clientnegative/orc_type_promotion1_acid.q   |   3 +
 .../clientnegative/orc_type_promotion2_acid.q   |   3 +
 .../clientnegative/orc_type_promotion3_acid.q   |   3 +
 .../test/queries/clientpositive/acid_nullscan.q |   1 +
 .../results/clientpositive/acid_nullscan.q.out  |   8 +-
 .../clientpositive/autoColumnStats_4.q.out      |   4 -
 .../clientpositive/druid/druidmini_mv.q.out     |  58 +++----
 .../llap/acid_bucket_pruning.q.out              |  12 +-
 .../llap/acid_vectorization_original.q.out      |  14 +-
 .../llap/default_constraint.q.out               |  21 +--
 .../llap/dynpart_sort_optimization_acid.q.out   |  94 +++++------
 .../llap/enforce_constraint_notnull.q.out       |  24 +--
 .../materialized_view_create_rewrite_3.q.out    |  20 +--
 ...ized_view_create_rewrite_rebuild_dummy.q.out |  20 +--
 .../results/clientpositive/llap/mm_all.q.out    |  12 +-
 .../materialized_view_create_rewrite_3.q.out    |  40 ++---
 ql/src/test/results/clientpositive/mm_all.q.out |   8 +-
 .../results/clientpositive/mm_default.q.out     |   2 +-
 .../tez/acid_vectorization_original_tez.q.out   |  14 +-
 .../clientpositive/tez/explainanalyze_5.q.out   |   8 +-
 .../hadoop/hive/common/StatsSetupConst.java     |   1 +
 .../hadoop/hive/metastore/HiveAlterHandler.java |  38 +++--
 .../hadoop/hive/metastore/HiveMetaStore.java    |   6 +-
 .../apache/hadoop/hive/metastore/Warehouse.java |   6 +-
 .../hadoop/hive/metastore/utils/FileUtils.java  |  11 +-
 .../hive/metastore/utils/MetaStoreUtils.java    | 157 ++++++++++---------
 .../hive/metastore/TestHiveMetaStore.java       |   2 +-
 57 files changed, 562 insertions(+), 396 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
index df77a4a..09343e5 100644
--- a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
@@ -54,17 +54,17 @@ public class HiveStatsUtils {
    * @return array of FileStatus
    * @throws IOException
    */
-  public static FileStatus[] getFileStatusRecurse(Path path, int level,  FileSystem fs)
+  public static List<FileStatus> getFileStatusRecurse(Path path, int level,  FileSystem fs)
       throws IOException {
     return getFileStatusRecurse(path, level, fs, FileUtils.HIDDEN_FILES_PATH_FILTER, false);
   }
 
-  public static FileStatus[] getFileStatusRecurse(
+  public static List<FileStatus> getFileStatusRecurse(
       Path path, int level, FileSystem fs, PathFilter filter) throws IOException {
     return getFileStatusRecurse(path, level, fs, filter, false);
   }
 
-  public static FileStatus[] getFileStatusRecurse(
+  public static List<FileStatus> getFileStatusRecurse(
       Path path, int level, FileSystem fs, PathFilter filter, boolean allLevelsBelow)
           throws IOException {
 
@@ -79,9 +79,9 @@ public class HiveStatsUtils {
         // does not exist. But getFileStatus() throw IOException. To mimic the
         // similar behavior we will return empty array on exception. For external
         // tables, the path of the table will not exists during table creation
-        return new FileStatus[0];
+        return new ArrayList<>(0);
       }
-      return result.toArray(new FileStatus[result.size()]);
+      return result;
     }
 
     // construct a path pattern (e.g., /*/*) to find all dynamically generated paths
@@ -91,7 +91,7 @@ public class HiveStatsUtils {
     }
     Path pathPattern = new Path(path, sb.toString());
     if (!allLevelsBelow) {
-      return fs.globStatus(pathPattern, filter);
+      return Lists.newArrayList(fs.globStatus(pathPattern, filter));
     }
     LinkedList<FileStatus> queue = new LinkedList<>();
     List<FileStatus> results = new ArrayList<FileStatus>();
@@ -114,7 +114,7 @@ public class HiveStatsUtils {
         }
       }
     }
-    return results.toArray(new FileStatus[results.size()]);
+    return results;
   }
 
   public static int getNumBitVectorsForNDVEstimation(Configuration conf) throws Exception {

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index fb926eb..04b8c4b 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -575,6 +575,8 @@ public class HiveConf extends Configuration {
 
     HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
     HIVE_IN_TEST_SSL("hive.in.ssl.test", false, "internal usage only, true in SSL test mode", true),
+    // TODO: this needs to be removed; see TestReplicationScenarios* comments.
+    HIVE_IN_TEST_REPL("hive.in.repl.test", false, "internal usage only, true in replication test mode", true),
     HIVE_IN_TEST_IDE("hive.in.ide.test", false, "internal usage only, true if test running in ide",
         true),
     HIVE_TESTING_SHORT_LOGS("hive.testing.short.logs", false,

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index 41c89b1..c383a53 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
 import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
@@ -72,12 +73,14 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertEquals;
@@ -146,6 +149,7 @@ public class TestReplicationScenarios {
     hconf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
     hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
     hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+    hconf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
     hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
     hconf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname,
         "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
@@ -2774,7 +2778,7 @@ public class TestReplicationScenarios {
   }
 
   @Test
-  public void testIncrementalRepeatEventOnMissingObject() throws IOException {
+  public void testIncrementalRepeatEventOnMissingObject() throws Exception {
     String testName = "incrementalRepeatEventOnMissingObject";
     String dbName = createDB(testName, driver);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver);
@@ -3211,10 +3215,13 @@ public class TestReplicationScenarios {
   }
 
   @Test
-  public void testSkipTables() throws IOException {
+  public void testSkipTables() throws Exception {
     String testName = "skipTables";
     String dbName = createDB(testName, driver);
 
+    // TODO: this is wrong; this test sets up dummy txn manager and so it cannot create ACID tables.
+    //       If I change it to use proper txn manager, the setup for some tests hangs.
+    //       This used to work by accident, now this works due a test flag. The test needs to be fixed.
     // Create table
     run("CREATE TABLE " + dbName + ".acid_table (key int, value int) PARTITIONED BY (load_date date) " +
         "CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver);
@@ -3651,14 +3658,10 @@ public class TestReplicationScenarios {
     assertEquals(NoSuchObjectException.class, e.getClass());
   }
 
-  private void verifyIfTableExist(String dbName, String tableName, HiveMetaStoreClient myClient){
-    Exception e = null;
-    try {
-      Table tbl = myClient.getTable(dbName, tableName);
-      assertNotNull(tbl);
-    } catch (TException te) {
-      assert(false);
-    }
+  private void verifyIfTableExist(
+      String dbName, String tableName, HiveMetaStoreClient myClient) throws Exception {
+    Table tbl = myClient.getTable(dbName, tableName);
+    assertNotNull(tbl);
   }
 
   private void verifyIfPartitionNotExist(String dbName, String tableName, List<String> partValues,

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
index 6e8d6b6..13b918d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
@@ -73,6 +73,7 @@ public class TestReplicationScenariosAcrossInstances {
         new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
     HashMap<String, String> overridesForHiveConf = new HashMap<String, String>() {{
       put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString());
+      put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
     }};
     primary = new WarehouseInstance(LOG, miniDFSCluster, overridesForHiveConf);
     replica = new WarehouseInstance(LOG, miniDFSCluster, overridesForHiveConf);
@@ -398,6 +399,9 @@ public class TestReplicationScenariosAcrossInstances {
         .run("create table t1 (i int, j int)")
         .run("create database " + dbOne)
         .run("use " + dbOne)
+    // TODO: this is wrong; this test sets up dummy txn manager and so it cannot create ACID tables.
+    //       This used to work by accident, now this works due a test flag. The test needs to be fixed.
+    //       Also applies for a couple more tests.
         .run("create table t1 (i int, j int) partitioned by (load_date date) "
             + "clustered by(i) into 2 buckets stored as orc tblproperties ('transactional'='true') ")
         .run("create database " + dbTwo)

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
index 9ea7a7c..f53afaf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
@@ -36,6 +36,7 @@ import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import org.apache.hadoop.hive.metastore.api.Schema;
 import org.apache.hadoop.hive.ql.exec.ConditionalTask;
 import org.apache.hadoop.hive.ql.exec.ExplainTask;
@@ -60,6 +61,8 @@ import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.protocol.TJSONProtocol;
 import org.apache.thrift.transport.TMemoryBuffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * QueryPlan can be serialized to disk so that we can restart/resume the
@@ -149,6 +152,7 @@ public class QueryPlan implements Serializable {
     this.acidResourcesInQuery = sem.hasTransactionalInQuery();
     this.acidSinks = sem.getAcidFileSinks();
   }
+  private static final Logger LOG = LoggerFactory.getLogger(QueryPlan.class);
 
   /**
    * @return true if any acid resources are read/written

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
index eee5e66..ce683c8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
@@ -62,7 +62,7 @@ public class CopyTask extends Task<CopyWork> implements Serializable {
   protected int copyOnePath(Path fromPath, Path toPath) {
     FileSystem dstFs = null;
     try {
-      Utilities.FILE_OP_LOGGER./**/debug("Copying data from {} to {} " + fromPath);
+      Utilities.FILE_OP_LOGGER.trace("Copying data from {} to {} " + fromPath);
       console.printInfo("Copying data from " + fromPath.toString(), " to "
           + toPath.toString());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index b490325..7eba5e8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -687,7 +687,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
    * has done it's job before the query ran.
    */
   WriteEntity.WriteType getWriteType(LoadTableDesc tbd, AcidUtils.Operation operation) {
-    if (tbd.getLoadFileType() == LoadFileType.REPLACE_ALL) {
+    if (tbd.getLoadFileType() == LoadFileType.REPLACE_ALL || tbd.isInsertOverwrite()) {
       return WriteEntity.WriteType.INSERT_OVERWRITE;
     }
     switch (operation) {
@@ -730,13 +730,13 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
       //       have the correct buckets. The existing code discards the inferred data when the
       //       reducers don't produce enough files; we'll do the same for MM tables for now.
       FileSystem fileSys = partn.getDataLocation().getFileSystem(conf);
-      FileStatus[] fileStatus = HiveStatsUtils.getFileStatusRecurse(
+      List<FileStatus> fileStatus = HiveStatsUtils.getFileStatusRecurse(
           partn.getDataLocation(), 1, fileSys);
       // Verify the number of buckets equals the number of files
       // This will not hold for dynamic partitions where not every reducer produced a file for
       // those partitions.  In this case the table is not bucketed as Hive requires a files for
       // each bucket.
-      if (fileStatus.length == numBuckets) {
+      if (fileStatus.size() == numBuckets) {
         List<String> newBucketCols = new ArrayList<String>();
         updateBucketCols = true;
         for (BucketCol bucketCol : bucketCols) {

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 804cd78..5fbe045 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -1493,8 +1493,9 @@ public final class Utilities {
       }
 
       // Remove duplicates from tmpPath
-      FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse(
+      List<FileStatus> statusList = HiveStatsUtils.getFileStatusRecurse(
           tmpPath, ((dpCtx == null) ? 1 : dpCtx.getNumDPCols()), fs);
+      FileStatus[] statuses = statusList.toArray(new FileStatus[statusList.size()]);
       if(statuses != null && statuses.length > 0) {
         PerfLogger perfLogger = SessionState.getPerfLogger();
         Set<Path> filesKept = new HashSet<Path>();
@@ -1601,8 +1602,9 @@ public final class Utilities {
     if (path  == null) {
       return null;
     }
-    FileStatus[] stats = HiveStatsUtils.getFileStatusRecurse(path,
+    List<FileStatus> statusList = HiveStatsUtils.getFileStatusRecurse(path,
         ((dpCtx == null) ? 1 : dpCtx.getNumDPCols()), fs);
+    FileStatus[] stats = statusList.toArray(new FileStatus[statusList.size()]);
     return removeTempOrDuplicateFiles(fs, stats, dpCtx, conf, hconf, isBaseDir);
   }
 
@@ -2675,9 +2677,9 @@ public final class Utilities {
       Path loadPath = dpCtx.getRootPath();
       FileSystem fs = loadPath.getFileSystem(conf);
       int numDPCols = dpCtx.getNumDPCols();
-      FileStatus[] status = HiveStatsUtils.getFileStatusRecurse(loadPath, numDPCols, fs);
+      List<FileStatus> status = HiveStatsUtils.getFileStatusRecurse(loadPath, numDPCols, fs);
 
-      if (status.length == 0) {
+      if (status.isEmpty()) {
         LOG.warn("No partition is generated by dynamic partitioning");
         return null;
       }
@@ -2690,9 +2692,9 @@ public final class Utilities {
 
       // for each dynamically created DP directory, construct a full partition spec
       // and load the partition based on that
-      for (int i = 0; i < status.length; ++i) {
+      for (int i = 0; i < status.size(); ++i) {
         // get the dynamically created directory
-        Path partPath = status[i].getPath();
+        Path partPath = status.get(i).getPath();
         assert fs.getFileStatus(partPath).isDir() : "partitions " + partPath
             + " is not a directory !";
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
index 0a82225..eb9b1e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
@@ -240,6 +240,8 @@ public class LoadPartitions {
    */
   private Task<?> movePartitionTask(Table table, AddPartitionDesc.OnePartitionDesc partSpec,
       Path tmpPath) {
+    // Note: this sets LoadFileType incorrectly for ACID; is that relevant for load?
+    //       See setLoadFileType and setIsAcidIow calls elsewhere for an example.
     LoadTableDesc loadTableWork = new LoadTableDesc(
         tmpPath, Utilities.getTableDesc(table), partSpec.getPartSpec(),
         event.replicationSpec().isReplace() ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING,

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index ced84b3..1828f0a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -34,6 +35,7 @@ import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 import org.apache.hadoop.hive.metastore.api.DataOperationType;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.orc.OrcFile;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater;
@@ -45,6 +47,7 @@ import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
 import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.mapred.JobConf;
 import org.apache.hive.common.util.Ref;
 import org.apache.orc.FileFormatException;
 import org.apache.orc.impl.OrcAcidUtils;
@@ -1034,6 +1037,7 @@ public class AcidUtils {
       return o1.getFileStatus().compareTo(o2.getFileStatus());
     });
 
+    // Note: isRawFormat is invalid for non-ORC tables. It will always return true, so we're good.
     final boolean isBaseInRawFormat = base != null && MetaDataFile.isRawFormat(base, fs);
     return new Directory() {
 
@@ -1098,7 +1102,13 @@ public class AcidUtils {
       FileSystem fs) throws IOException {
     Path p = child.getPath();
     String fn = p.getName();
-    if (fn.startsWith(BASE_PREFIX) && child.isDir()) {
+    if (!child.isDirectory()) {
+      if (!ignoreEmptyFiles || child.getLen() != 0) {
+        original.add(createOriginalObj(childWithId, child));
+      }
+      return;
+    }
+    if (fn.startsWith(BASE_PREFIX)) {
       long writeId = parseBase(p);
       if(bestBase.oldestBaseWriteId > writeId) {
         //keep track for error reporting
@@ -1119,28 +1129,25 @@ public class AcidUtils {
       } else {
         obsolete.add(child);
       }
-    } else if ((fn.startsWith(DELTA_PREFIX) || fn.startsWith(DELETE_DELTA_PREFIX))
-                    && child.isDir()) {
-      String deltaPrefix =
-              (fn.startsWith(DELTA_PREFIX)) ? DELTA_PREFIX : DELETE_DELTA_PREFIX;
+    } else if (fn.startsWith(DELTA_PREFIX) || fn.startsWith(DELETE_DELTA_PREFIX)) {
+      String deltaPrefix = fn.startsWith(DELTA_PREFIX)  ? DELTA_PREFIX : DELETE_DELTA_PREFIX;
       ParsedDelta delta = parseDelta(child, deltaPrefix, fs);
-      if (tblproperties != null && AcidUtils.isInsertOnlyTable(tblproperties) &&
-        ValidWriteIdList.RangeResponse.ALL == writeIdList.isWriteIdRangeAborted(delta.minWriteId, delta.maxWriteId)) {
+      // Handle aborted deltas. Currently this can only happen for MM tables.
+      if (tblproperties != null && isTransactionalTable(tblproperties) &&
+        ValidWriteIdList.RangeResponse.ALL == writeIdList.isWriteIdRangeAborted(
+            delta.minWriteId, delta.maxWriteId)) {
         aborted.add(child);
       }
-      if (writeIdList.isWriteIdRangeValid(delta.minWriteId,
-          delta.maxWriteId) !=
-              ValidWriteIdList.RangeResponse.NONE) {
+      if (writeIdList.isWriteIdRangeValid(
+          delta.minWriteId, delta.maxWriteId) != ValidWriteIdList.RangeResponse.NONE) {
         working.add(delta);
       }
-    } else if (child.isDir()) {
+    } else {
       // This is just the directory.  We need to recurse and find the actual files.  But don't
       // do this until we have determined there is no base.  This saves time.  Plus,
       // it is possible that the cleaner is running and removing these original files,
       // in which case recursing through them could cause us to get an error.
       originalDirectories.add(child);
-    } else if (!ignoreEmptyFiles || child.getLen() != 0){
-      original.add(createOriginalObj(childWithId, child));
     }
   }
 
@@ -1252,9 +1259,13 @@ public class AcidUtils {
     if (table == null || table.getTblProps() == null) {
       return false;
     }
-    String tableIsTransactional = table.getTblProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+    return isTransactionalTable(table.getTblProps());
+  }
+
+  public static boolean isTransactionalTable(Map<String, String> props) {
+    String tableIsTransactional = props.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
     if (tableIsTransactional == null) {
-      tableIsTransactional = table.getTblProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase());
+      tableIsTransactional = props.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase());
     }
     return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
   }
@@ -1744,4 +1755,40 @@ public class AcidUtils {
       }
     }
   }
+
+  public static List<FileStatus> getAcidFilesForStats(
+      Table table, Path dir, Configuration jc, FileSystem fs) throws IOException {
+    List<FileStatus> fileList = new ArrayList<>();
+    ValidWriteIdList idList = AcidUtils.getTableValidWriteIdList(jc,
+        AcidUtils.getFullTableName(table.getDbName(), table.getTableName()));
+    if (idList == null) {
+      LOG.warn("Cannot get ACID state for " + table.getDbName() + "." + table.getTableName()
+          + " from " + jc.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY));
+      return null;
+    }
+    Directory acidInfo = AcidUtils.getAcidState(dir, jc, idList);
+    // Assume that for an MM table, or if there's only the base directory, we are good. 
+    if (!acidInfo.getCurrentDirectories().isEmpty() && AcidUtils.isFullAcidTable(table)) {
+      Utilities.FILE_OP_LOGGER.warn(
+          "Computing stats for an ACID table; stats may be inaccurate");
+    }
+    if (fs == null) {
+      fs = dir.getFileSystem(jc);
+    }
+    for (HdfsFileStatusWithId hfs : acidInfo.getOriginalFiles()) {
+      fileList.add(hfs.getFileStatus());
+    }
+    for (ParsedDelta delta : acidInfo.getCurrentDirectories()) {
+      for (FileStatus f : HiveStatsUtils.getFileStatusRecurse(delta.getPath(), -1, fs)) {
+        fileList.add(f);
+      }
+    }
+    if (acidInfo.getBaseDirectory() != null) {
+      for (FileStatus f : HiveStatsUtils.getFileStatusRecurse(
+          acidInfo.getBaseDirectory(), -1, fs)) {
+        fileList.add(f);
+      }
+    }
+    return fileList;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
index 1a63d3f..07abd37 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
@@ -161,15 +161,14 @@ public class MergeFileWork extends MapWork {
       Path dirPath = inputPaths.get(0);
       try {
         FileSystem inpFs = dirPath.getFileSystem(conf);
-        FileStatus[] status =
-            HiveStatsUtils.getFileStatusRecurse(dirPath, listBucketingCtx
-                .getSkewedColNames().size(), inpFs);
+        List<FileStatus> status = HiveStatsUtils.getFileStatusRecurse(
+            dirPath, listBucketingCtx.getSkewedColNames().size(), inpFs);
         List<Path> newInputPath = new ArrayList<Path>();
         boolean succeed = true;
-        for (int i = 0; i < status.length; ++i) {
-          if (status[i].isDir()) {
+        for (FileStatus s : status) {
+          if (s.isDir()) {
             // Add the lb path to the list of input paths
-            newInputPath.add(status[i].getPath());
+            newInputPath.add(s.getPath());
           } else {
             // find file instead of dir. dont change inputpath
             succeed = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index c0be51e..6b635fc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1575,10 +1575,9 @@ public class Hive {
       Path newPartPath = null;
 
       if (inheritTableSpecs) {
-        Path partPath = new Path(tbl.getDataLocation(),
-            Warehouse.makePartPath(partSpec));
-        newPartPath = new Path(tblDataLocationPath.toUri().getScheme(), tblDataLocationPath.toUri().getAuthority(),
-            partPath.toUri().getPath());
+        Path partPath = new Path(tbl.getDataLocation(), Warehouse.makePartPath(partSpec));
+        newPartPath = new Path(tblDataLocationPath.toUri().getScheme(),
+            tblDataLocationPath.toUri().getAuthority(), partPath.toUri().getPath());
 
         if(oldPart != null) {
           /*
@@ -1606,6 +1605,12 @@ public class Hive {
       if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && (null != oldPart)) {
         newFiles = Collections.synchronizedList(new ArrayList<Path>());
       }
+
+
+      // Note: the stats for ACID tables do not have any coordination with either Hive ACID logic
+      //       like txn commits, time outs, etc.; nor the lower level sync in metastore pertaining
+      //       to ACID updates. So the are not themselves ACID.
+
       // Note: this assumes both paths are qualified; which they are, currently.
       if (isMmTableWrite && loadPath.equals(newPartPath)) {
         // MM insert query, move itself is a no-op.
@@ -1626,7 +1631,9 @@ public class Hive {
         Path destPath = newPartPath;
         if (isMmTableWrite) {
           // We will load into MM directory, and delete from the parent if needed.
+          // TODO: this looks invalid after ACID integration. What about base dirs?
           destPath = new Path(destPath, AcidUtils.deltaSubdir(writeId, writeId, stmtId));
+          // TODO: loadFileType for MM table will no longer be REPLACE_ALL
           filter = (loadFileType == LoadFileType.REPLACE_ALL)
             ? new JavaUtils.IdPathFilter(writeId, stmtId, false, true) : filter;
         }
@@ -1641,6 +1648,7 @@ public class Hive {
           //for fullAcid tables we don't delete files for commands with OVERWRITE - we create a new
           // base_x.  (there is Insert Overwrite and Load Data Overwrite)
           boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
+          // TODO: this should never run for MM tables anymore. Remove the flag, and maybe the filter?
           replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(),
               isSrcLocal, isAutoPurge, newFiles, filter, isMmTableWrite, !tbl.isTemporary());
         } else {
@@ -1689,7 +1697,21 @@ public class Hive {
           StatsSetupConst.setStatsStateForCreateTable(newTPart.getParameters(),
               MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE);
         }
-        MetaStoreUtils.populateQuickStats(HiveStatsUtils.getFileStatusRecurse(newPartPath, -1, newPartPath.getFileSystem(conf)), newTPart.getParameters());
+        // Note: we are creating a brand new the partition, so this is going to be valid for ACID.
+        List<FileStatus> filesForStats = null;
+        if (isFullAcidTable || isMmTableWrite) {
+          filesForStats = AcidUtils.getAcidFilesForStats(
+              newTPart.getTable(), newPartPath, conf, null);
+        } else {
+          filesForStats = HiveStatsUtils.getFileStatusRecurse(
+              newPartPath, -1, newPartPath.getFileSystem(conf));
+        }
+        if (filesForStats != null) {
+          MetaStoreUtils.populateQuickStats(filesForStats, newTPart.getParameters());
+        } else {
+          // The ACID state is probably absent. Warning is logged in the get method.
+          MetaStoreUtils.clearQuickStats(newTPart.getParameters());
+        }
         try {
           LOG.debug("Adding new partition " + newTPart.getSpec());
           getSynchronizedMSC().add_partition(newTPart.getTPartition());
@@ -1946,7 +1968,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     try {
       FileSystem fs = loadPath.getFileSystem(conf);
       if (!isMmTable) {
-        FileStatus[] leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs);
+        List<FileStatus> leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs);
         // Check for empty partitions
         for (FileStatus s : leafStatus) {
           if (!s.isDirectory()) {
@@ -2168,9 +2190,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
     if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
       newFiles = Collections.synchronizedList(new ArrayList<Path>());
     }
+
     // Note: this assumes both paths are qualified; which they are, currently.
     if (isMmTable && loadPath.equals(tbl.getPath())) {
-      Utilities.FILE_OP_LOGGER.debug("not moving " + loadPath + " to " + tbl.getPath());
+      if (Utilities.FILE_OP_LOGGER.isDebugEnabled()) {
+        Utilities.FILE_OP_LOGGER.debug(
+            "not moving " + loadPath + " to " + tbl.getPath() + " (MM)");
+      }
       newFiles = listFilesCreatedByQuery(loadPath, writeId, stmtId);
     } else {
       // Either a non-MM query, or a load into MM table from an external source.
@@ -2180,7 +2206,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
       if (isMmTable) {
         assert !isAcidIUDoperation;
         // We will load into MM directory, and delete from the parent if needed.
+        // TODO: this looks invalid after ACID integration. What about base dirs?
         destPath = new Path(destPath, AcidUtils.deltaSubdir(writeId, writeId, stmtId));
+        // TODO: loadFileType for MM table will no longer be REPLACE_ALL
         filter = loadFileType == LoadFileType.REPLACE_ALL
             ? new JavaUtils.IdPathFilter(writeId, stmtId, false, true) : filter;
       }
@@ -2193,6 +2221,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
         //for fullAcid we don't want to delete any files even for OVERWRITE see HIVE-14988/HIVE-17361
         //todo:  should probably do the same for MM IOW
         boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
+        // TODO: this should never run for MM tables anymore. Remove the flag, and maybe the filter?
         replaceFiles(tblPath, loadPath, destPath, tblPath,
             sessionConf, isSrcLocal, isAutopurge, newFiles, filter, isMmTable?true:false, !tbl.isTemporary());
       } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 1c6b793..b7fbea4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -520,6 +520,8 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
 
       Task<?> addPartTask = TaskFactory.get(new DDLWork(x.getInputs(),
           x.getOutputs(), addPartitionDesc), x.getConf());
+      // Note: this sets LoadFileType incorrectly for ACID; is that relevant for import?
+      //       See setLoadFileType and setIsAcidIow calls elsewhere for an example.
       LoadTableDesc loadTableWork = new LoadTableDesc(moveTaskSrc, Utilities.getTableDesc(table),
           partSpec.getPartSpec(),
           replicationSpec.isReplace() ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING,

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 7d2de75..fb3bfda 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -329,8 +329,9 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
       stmtId = SessionState.get().getTxnMgr().getStmtIdAndIncrement();
     }
 
-    LoadTableDesc loadTableWork;
-    loadTableWork = new LoadTableDesc(new Path(fromURI),
+    // Note: this sets LoadFileType incorrectly for ACID; is that relevant for load?
+    //       See setLoadFileType and setIsAcidIow calls elsewhere for an example.
+    LoadTableDesc loadTableWork = new LoadTableDesc(new Path(fromURI),
       Utilities.getTableDesc(ts.tableHandle), partSpec,
       isOverWrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING, writeId);
     loadTableWork.setStmtId(stmtId);

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index d99df80..d1609e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6953,10 +6953,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp, isReplace, writeId);
         // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
         // deltas and base and leave them up to the cleaner to clean up
-        LoadFileType loadType = (!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
-            dest_tab.getTableName()) && !destTableIsTransactional)
+        boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(
+            dest_tab.getDbName(), dest_tab.getTableName());
+        LoadFileType loadType = (!isInsertInto && !destTableIsTransactional)
             ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
         ltd.setLoadFileType(loadType);
+        ltd.setInsertOverwrite(!isInsertInto);
         ltd.setLbCtx(lbCtx);
         loadTableWork.add(ltd);
       } else {
@@ -7042,10 +7044,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, writeId);
       // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
       // deltas and base and leave them up to the cleaner to clean up
-      LoadFileType loadType = (!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
-          dest_tab.getTableName()) && !destTableIsTransactional) // // Both Full-acid and MM tables are excluded.
+      boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(
+          dest_tab.getDbName(), dest_tab.getTableName());
+      LoadFileType loadType = (!isInsertInto && !destTableIsTransactional)
           ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
       ltd.setLoadFileType(loadType);
+      ltd.setInsertOverwrite(!isInsertInto);
       ltd.setLbCtx(lbCtx);
 
       loadTableWork.add(ltd);
@@ -7055,7 +7059,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES
             .getMsg(dest_tab.getTableName() + "@" + dest_part.getName()));
       }
-      break;
+     break;
     }
     case QBMetaData.DEST_LOCAL_FILE:
       isLocal = true;
@@ -12323,7 +12327,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     for (WriteEntity writeEntity : getOutputs()) {
       WriteEntity.Type type = writeEntity.getType();
 
-
       if (type == WriteEntity.Type.PARTITION || type == WriteEntity.Type.DUMMYPARTITION) {
         String conflictingArchive = null;
         try {
@@ -12697,8 +12700,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       }
     }
 
-    addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE);
-
     if (isTemporary) {
       if (partCols.size() > 0) {
         throw new SemanticException("Partition columns are not supported on temporary tables");
@@ -12720,11 +12721,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     // Handle different types of CREATE TABLE command
+    // Note: each branch must call addDbAndTabToOutputs after finalizing table properties.
+
     switch (command_type) {
 
     case CREATE_TABLE: // REGULAR CREATE TABLE DDL
       tblProps = addDefaultProperties(
           tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization);
+      addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, tblProps);
 
       CreateTableDesc crtTblDesc = new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols,
           bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim,
@@ -12747,6 +12751,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     case CTLT: // create table like <tbl_name>
       tblProps = addDefaultProperties(
           tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization);
+      addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, tblProps);
 
       if (isTemporary) {
         Table likeTable = getTable(likeTableName, false);
@@ -12825,6 +12830,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
       tblProps = addDefaultProperties(
           tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization);
+      addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, tblProps);
       tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols,
           partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim,
           rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim,
@@ -12846,11 +12852,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     return null;
   }
 
-  private void addDbAndTabToOutputs(String[] qualifiedTabName, TableType type) throws SemanticException {
+  /** Adds entities for create table/create view. */
+  private void addDbAndTabToOutputs(String[] qualifiedTabName, TableType type,
+      Map<String, String> tblProps) throws SemanticException {
     Database database  = getDatabase(qualifiedTabName[0]);
     outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED));
 
     Table t = new Table(qualifiedTabName[0], qualifiedTabName[1]);
+    t.setParameters(tblProps);
     t.setTableType(type);
     outputs.add(new WriteEntity(t, WriteEntity.WriteType.DDL_NO_LOCK));
   }
@@ -12952,7 +12961,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           storageFormat.getInputFormat(), storageFormat.getOutputFormat(),
           location, storageFormat.getSerde(), storageFormat.getStorageHandler(),
           storageFormat.getSerdeProps());
-      addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW);
+      addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW, tblProps);
       queryState.setCommandType(HiveOperation.CREATE_MATERIALIZED_VIEW);
     } else {
       createVwDesc = new CreateViewDesc(
@@ -12961,7 +12970,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           storageFormat.getOutputFormat(), storageFormat.getSerde());
       rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
           createVwDesc), conf));
-      addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW);
+      addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW, tblProps);
       queryState.setCommandType(HiveOperation.CREATEVIEW);
     }
     qb.setViewDesc(createVwDesc);
@@ -14074,7 +14083,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   // Make sure the proper transaction manager that supports ACID is being used
   protected void checkAcidTxnManager(Table table) throws SemanticException {
-    if (SessionState.get() != null && !getTxnMgr().supportsAcid()) {
+    if (SessionState.get() != null && !getTxnMgr().supportsAcid()
+        && !HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST_REPL)) {
       throw new SemanticException(ErrorMsg.TXNMGR_NOT_ACID, table.getDbName(), table.getTableName());
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
index a4e770c..55d05a1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
@@ -172,9 +172,11 @@ public class BasicStatsWork implements Serializable {
       return true;
     }
     // INSERT OVERWRITE
-    if (getLoadTableDesc() != null && getLoadTableDesc().getLoadFileType() == LoadFileType.REPLACE_ALL) {
+    LoadTableDesc ltd = getLoadTableDesc();
+    if (ltd != null && (ltd.getLoadFileType() == LoadFileType.REPLACE_ALL || ltd.isInsertOverwrite())) {
       return true;
     }
+
     // CREATE TABLE ... AS
     if (getLoadFileDesc() != null && getLoadFileDesc().getCtasCreateTableDesc() != null) {
       return true;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
index 8ce0cb0..80f77b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
@@ -231,7 +231,8 @@ public class ConditionalResolverMergeFiles implements ConditionalResolver,
       throws IOException {
     DynamicPartitionCtx dpCtx = ctx.getDPCtx();
     // get list of dynamic partitions
-    FileStatus[] status = HiveStatsUtils.getFileStatusRecurse(dirPath, dpLbLevel, inpFs);
+    List<FileStatus> statusList = HiveStatsUtils.getFileStatusRecurse(dirPath, dpLbLevel, inpFs);
+    FileStatus[] status = statusList.toArray(new FileStatus[statusList.size()]);
 
     // cleanup pathToPartitionInfo
     Map<Path, PartitionDesc> ptpi = work.getPathToPartitionInfo();

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
index 946c300..d4d46a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.exec.StatsTask;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -161,11 +163,18 @@ public class BasicStatsNoJobTask implements IStatsProcessor {
         long rawDataSize = 0;
         long fileSize = 0;
         long numFiles = 0;
-        LOG.debug("Aggregating stats for {}", dir);
-        FileStatus[] fileList = HiveStatsUtils.getFileStatusRecurse(dir, -1, fs);
+        // Note: this code would be invalid for transactional tables of any kind.
+        Utilities.FILE_OP_LOGGER.debug("Aggregating stats for {}", dir);
+        List<FileStatus> fileList = null;
+        if (partish.getTable() != null
+            && AcidUtils.isTransactionalTable(partish.getTable())) {
+          fileList = AcidUtils.getAcidFilesForStats(partish.getTable(), dir, jc, fs);
+        } else {
+          fileList = HiveStatsUtils.getFileStatusRecurse(dir, -1, fs);
+        }
 
         for (FileStatus file : fileList) {
-          LOG.debug("Computing stats for {}", file);
+          Utilities.FILE_OP_LOGGER.debug("Computing stats for {}", file);
           if (!file.isDirectory()) {
             InputFormat<?, ?> inputFormat = ReflectionUtil.newInstance(partish.getInputFormatClass(), jc);
             InputSplit dummySplit = new FileSplit(file.getPath(), 0, 0, new String[] { partish.getLocation() });

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
index 1d7660e..8c23887 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.hive.ql.stats;
 
+import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -42,6 +43,7 @@ import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -111,7 +113,8 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
   private static class BasicStatsProcessor {
 
     private Partish partish;
-    private FileStatus[] partfileStatus;
+    private List<FileStatus> partfileStatus;
+    private boolean isMissingAcidState = false;
     private BasicStatsWork work;
     private boolean followedColStats1;
 
@@ -124,11 +127,10 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
     public Object process(StatsAggregator statsAggregator) throws HiveException, MetaException {
       Partish p = partish;
       Map<String, String> parameters = p.getPartParameters();
-      if (p.isAcid()) {
+      if (p.isTransactionalTable()) {
+        // TODO: this should also happen on any error. Right now this task will just fail.
         StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
-      }
-
-      if (work.isTargetRewritten()) {
+      } else if (work.isTargetRewritten()) {
         StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
       }
 
@@ -140,8 +142,15 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
         StatsSetupConst.clearColumnStatsState(parameters);
       }
 
-      if(partfileStatus == null){
-        LOG.warn("Partition/partfiles is null for: " + partish.getPartition().getSpec());
+      if (partfileStatus == null) {
+        // This may happen if ACID state is absent from config.
+        String spec =  partish.getPartition() == null ? partish.getTable().getTableName()
+            :  partish.getPartition().getSpec().toString();
+        LOG.warn("Partition/partfiles is null for: " + spec);
+        if (isMissingAcidState) {
+          MetaStoreUtils.clearQuickStats(parameters);
+          return p.getOutput();
+        }
         return null;
       }
 
@@ -153,23 +162,28 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
         StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
       }
 
-      updateQuickStats(parameters, partfileStatus);
-      if (StatsSetupConst.areBasicStatsUptoDate(parameters)) {
-        if (statsAggregator != null) {
+      MetaStoreUtils.populateQuickStats(partfileStatus, parameters);
+
+      if (statsAggregator != null) {
+        // Update stats for transactional tables (MM, or full ACID with overwrite), even
+        // though we are marking stats as not being accurate.
+        if (StatsSetupConst.areBasicStatsUptoDate(parameters) || p.isTransactionalTable()) {
           String prefix = getAggregationPrefix(p.getTable(), p.getPartition());
-          updateStats(statsAggregator, parameters, prefix);
+          updateStats(statsAggregator, parameters, prefix, p.isAcid());
         }
       }
 
       return p.getOutput();
     }
 
-    public void collectFileStatus(Warehouse wh) throws MetaException {
-      partfileStatus = wh.getFileStatusesForSD(partish.getPartSd());
-    }
-
-    private void updateQuickStats(Map<String, String> parameters, FileStatus[] partfileStatus) throws MetaException {
-      MetaStoreUtils.populateQuickStats(partfileStatus, parameters);
+    public void collectFileStatus(Warehouse wh, HiveConf conf) throws MetaException, IOException {
+      if (!partish.isTransactionalTable()) {
+        partfileStatus = wh.getFileStatusesForSD(partish.getPartSd());
+      } else {
+        Path path = new Path(partish.getPartSd().getLocation());
+        partfileStatus = AcidUtils.getAcidFilesForStats(partish.getTable(), path, conf, null);
+        isMissingAcidState = true;
+      }
     }
 
     private String getAggregationPrefix(Table table, Partition partition) throws MetaException {
@@ -191,9 +205,15 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
       return prefix;
     }
 
-    private void updateStats(StatsAggregator statsAggregator, Map<String, String> parameters, String aggKey) throws HiveException {
-
+    private void updateStats(StatsAggregator statsAggregator, Map<String, String> parameters,
+        String aggKey, boolean isFullAcid) throws HiveException {
       for (String statType : StatsSetupConst.statsRequireCompute) {
+        if (isFullAcid && !work.isTargetRewritten()) {
+          // Don't bother with aggregation in this case, it will probably be invalid.
+          parameters.remove(statType);
+          continue;
+        }
+
         String value = statsAggregator.aggregateStats(aggKey, statType);
         if (value != null && !value.isEmpty()) {
           long longValue = Long.parseLong(value);
@@ -247,7 +267,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
         partishes.add(p = new Partish.PTable(table));
 
         BasicStatsProcessor basicStatsProcessor = new BasicStatsProcessor(p, work, conf, followedColStats);
-        basicStatsProcessor.collectFileStatus(wh);
+        basicStatsProcessor.collectFileStatus(wh, conf);
         Table res = (Table) basicStatsProcessor.process(statsAggregator);
         if (res == null) {
           return 0;
@@ -280,7 +300,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
             futures.add(pool.submit(new Callable<Void>() {
               @Override
               public Void call() throws Exception {
-                bsp.collectFileStatus(wh);
+                bsp.collectFileStatus(wh, conf);
                 return null;
               }
             }));

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
index 7591c06..d4cfd0a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
@@ -184,4 +184,4 @@ public class ColStatsProcessor implements IStatsProcessor {
   public void setDpPartSpecs(Collection<Partition> dpPartSpecs) {
   }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
index 05b0474..47810e2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
@@ -51,6 +51,10 @@ public abstract class Partish {
     return AcidUtils.isFullAcidTable(getTable());
   }
 
+  public final boolean isTransactionalTable() {
+    return AcidUtils.isTransactionalTable(getTable());
+  }
+
   public abstract Table getTable();
 
   public abstract Map<String, String> getPartParameters();

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
index d84cf13..6d2de0a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
@@ -50,9 +50,7 @@ public class FSStatsAggregator implements StatsAggregator {
     List<String> statsDirs = scc.getStatsTmpDirs();
     assert statsDirs.size() == 1 : "Found multiple stats dirs: " + statsDirs;
     Path statsDir = new Path(statsDirs.get(0));
-    if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
-      Utilities.FILE_OP_LOGGER.trace("About to read stats from : " + statsDir);
-    }
+    Utilities.FILE_OP_LOGGER.trace("About to read stats from {}", statsDir);
     statsMap  = new HashMap<String, Map<String,String>>();
 
     try {
@@ -65,6 +63,7 @@ public class FSStatsAggregator implements StatsAggregator {
         }
       });
       for (FileStatus file : status) {
+        Utilities.FILE_OP_LOGGER.trace("About to read stats file {} ", file.getPath());
         Input in = new Input(fs.open(file.getPath()));
         Kryo kryo = SerializationUtilities.borrowKryo();
         try {
@@ -72,6 +71,7 @@ public class FSStatsAggregator implements StatsAggregator {
         } finally {
           SerializationUtilities.releaseKryo(kryo);
         }
+        Utilities.FILE_OP_LOGGER.trace("Read : {}", statsMap);
         statsList.add(statsMap);
         in.close();
       }
@@ -86,7 +86,7 @@ public class FSStatsAggregator implements StatsAggregator {
   @Override
   public String aggregateStats(String partID, String statType) {
     long counter = 0;
-    LOG.debug("Part ID: " + partID + "\t" + statType);
+    Utilities.FILE_OP_LOGGER.debug("Part ID: " + partID + "\t" + statType);
     for (Map<String,Map<String,String>> statsMap : statsList) {
       Map<String,String> partStat = statsMap.get(partID);
       if (null == partStat) { // not all partitions are scanned in all mappers, so this could be null.
@@ -98,7 +98,7 @@ public class FSStatsAggregator implements StatsAggregator {
       }
       counter += Long.parseLong(statVal);
     }
-    LOG.info("Read stats for : " + partID + "\t" + statType + "\t" + counter);
+    Utilities.FILE_OP_LOGGER.info("Read stats for : " + partID + "\t" + statType + "\t" + counter);
 
     return String.valueOf(counter);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
index 922cfc2..902b37f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
@@ -105,9 +105,7 @@ public class FSStatsPublisher implements StatsPublisher {
         statsFile = new Path(statsDir, StatsSetupConst.STATS_FILE_PREFIX
             + conf.getInt("mapred.task.partition", 0));
       }
-      if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
-        Utilities.FILE_OP_LOGGER.trace("About to create stats file for this task : " + statsFile);
-      }
+      Utilities.FILE_OP_LOGGER.trace("About to create stats file for this task : {}", statsFile);
       Output output = new Output(statsFile.getFileSystem(conf).create(statsFile,true));
       LOG.debug("Created file : " + statsFile);
       LOG.debug("Writing stats in it : " + statsMap);

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_change_fileformat_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_change_fileformat_acid.q b/ql/src/test/queries/clientnegative/orc_change_fileformat_acid.q
index cc73616..e9a1a41 100644
--- a/ql/src/test/queries/clientnegative/orc_change_fileformat_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_change_fileformat_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 create table src_orc (key tinyint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 alter table src_orc set fileformat textfile;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_change_serde_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_change_serde_acid.q b/ql/src/test/queries/clientnegative/orc_change_serde_acid.q
index 91a2be5..d5f208c 100644
--- a/ql/src/test/queries/clientnegative/orc_change_serde_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_change_serde_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 create table src_orc (key tinyint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 alter table src_orc set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_reorder_columns1_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_reorder_columns1_acid.q b/ql/src/test/queries/clientnegative/orc_reorder_columns1_acid.q
index 234e74b..0169784 100644
--- a/ql/src/test/queries/clientnegative/orc_reorder_columns1_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_reorder_columns1_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 create table src_orc (key tinyint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 alter table src_orc change key k tinyint first;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_reorder_columns2_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_reorder_columns2_acid.q b/ql/src/test/queries/clientnegative/orc_reorder_columns2_acid.q
index 57ab049..d42752e 100644
--- a/ql/src/test/queries/clientnegative/orc_reorder_columns2_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_reorder_columns2_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 create table src_orc (key tinyint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 alter table src_orc change key k tinyint after val;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_replace_columns1_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_replace_columns1_acid.q b/ql/src/test/queries/clientnegative/orc_replace_columns1_acid.q
index 9fe9209..91367fd 100644
--- a/ql/src/test/queries/clientnegative/orc_replace_columns1_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_replace_columns1_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 create table src_orc (key tinyint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 alter table src_orc replace columns (k int);

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_replace_columns2_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_replace_columns2_acid.q b/ql/src/test/queries/clientnegative/orc_replace_columns2_acid.q
index 7b37757..a46c010 100644
--- a/ql/src/test/queries/clientnegative/orc_replace_columns2_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_replace_columns2_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 
 -- Currently, string to int conversion is not supported because it isn't in the lossless

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_replace_columns3_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_replace_columns3_acid.q b/ql/src/test/queries/clientnegative/orc_replace_columns3_acid.q
index e3cb819..59f7f45 100644
--- a/ql/src/test/queries/clientnegative/orc_replace_columns3_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_replace_columns3_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 
 -- Currently, smallint to tinyint conversion is not supported because it isn't in the lossless

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_type_promotion1_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_type_promotion1_acid.q b/ql/src/test/queries/clientnegative/orc_type_promotion1_acid.q
index 3a8c08a..1eda267 100644
--- a/ql/src/test/queries/clientnegative/orc_type_promotion1_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_type_promotion1_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 
 -- Currently, string to int conversion is not supported because it isn't in the lossless

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_type_promotion2_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_type_promotion2_acid.q b/ql/src/test/queries/clientnegative/orc_type_promotion2_acid.q
index 1d24b1d..b593b0f 100644
--- a/ql/src/test/queries/clientnegative/orc_type_promotion2_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_type_promotion2_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 
 -- Currently, bigint to int conversion is not supported because it isn't in the lossless

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientnegative/orc_type_promotion3_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_type_promotion3_acid.q b/ql/src/test/queries/clientnegative/orc_type_promotion3_acid.q
index 83764e2..94832f6 100644
--- a/ql/src/test/queries/clientnegative/orc_type_promotion3_acid.q
+++ b/ql/src/test/queries/clientnegative/orc_type_promotion3_acid.q
@@ -1,3 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
 SET hive.exec.schema.evolution=false;
 
 -- Currently, double to smallint conversion is not supported because it isn't in the lossless

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/queries/clientpositive/acid_nullscan.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_nullscan.q b/ql/src/test/queries/clientpositive/acid_nullscan.q
index d048231..3c71242 100644
--- a/ql/src/test/queries/clientpositive/acid_nullscan.q
+++ b/ql/src/test/queries/clientpositive/acid_nullscan.q
@@ -10,6 +10,7 @@ CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STO
 insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10;
 insert into table acid_vectorized values (1, 'bar');
 
+
 explain extended
 select sum(a) from acid_vectorized where false;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/acid_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_nullscan.q.out b/ql/src/test/results/clientpositive/acid_nullscan.q.out
index 669fa3f..d15e2f1 100644
--- a/ql/src/test/results/clientpositive/acid_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/acid_nullscan.q.out
@@ -42,12 +42,12 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: acid_vectorized
-            Statistics: Num rows: 1 Data size: 25470 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 88 Data size: 25470 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
               predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 25470 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 289 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(a)
                 mode: hash
@@ -78,8 +78,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.acid_vectorized
               numFiles 3
-              numRows 0
-              rawDataSize 0
               serialization.ddl struct acid_vectorized { i32 a, string b}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
@@ -101,8 +99,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.acid_vectorized
                 numFiles 3
-                numRows 0
-                rawDataSize 0
                 serialization.ddl struct acid_vectorized { i32 a, string b}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
index 1f4c0ad..63caa17 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
@@ -195,8 +195,6 @@ Retention:          	0
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
 	numFiles            	2                   
-	numRows             	0                   
-	rawDataSize         	0                   
 	totalSize           	1862                
 	transactional       	true                
 	transactional_properties	default             
@@ -239,8 +237,6 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	4                   
-	numRows             	0                   
-	rawDataSize         	0                   
 	totalSize           	3012                
 	transactional       	true                
 	transactional_properties	default             

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
index b4c8be0..18ca78f 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
@@ -330,33 +330,33 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable
-                  Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 42 Data size: 492 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (a = 3) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 58 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: double)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 58 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 58 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: double)
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable
-                  Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 42 Data size: 656 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((a = 3) and (d = 3)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: double)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: double)
         Reducer 2 
             Reduce Operator Tree:
@@ -367,14 +367,14 @@ STAGE PLANS:
                   0 
                   1 
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 1044 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 36 Data size: 1044 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 36 Data size: 1044 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -451,34 +451,34 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable
-                  Statistics: Num rows: 6 Data size: 2352 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 42 Data size: 16072 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (a = 3) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 2352 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 1913 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: CAST( t AS timestamp with local time zone) (type: timestamp with local time zone), 3 (type: int), b (type: varchar(256)), c (type: double)
                       outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 6 Data size: 2352 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 1913 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp)
                         outputColumnNames: _col0, _col1, _col2, _col3, __time_granularity
-                        Statistics: Num rows: 6 Data size: 2352 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 1913 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: __time_granularity (type: timestamp)
                           sort order: +
                           Map-reduce partition columns: __time_granularity (type: timestamp)
-                          Statistics: Num rows: 6 Data size: 2352 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 5 Data size: 1913 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: timestamp with local time zone), _col1 (type: int), _col2 (type: varchar(256)), _col3 (type: double)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: int), VALUE._col2 (type: varchar(256)), VALUE._col3 (type: double), KEY.__time_granularity (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, __time_granularity
-                Statistics: Num rows: 6 Data size: 2352 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 1913 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   Dp Sort State: PARTITION_SORTED
-                  Statistics: Num rows: 6 Data size: 2352 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 1913 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
                       output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
@@ -537,33 +537,33 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable
-                  Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 42 Data size: 492 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (a = 3) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 58 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: double)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 58 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 58 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: double)
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable
-                  Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 42 Data size: 656 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((a = 3) and (d = 3)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: double)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: double)
         Reducer 2 
             Reduce Operator Tree:
@@ -574,14 +574,14 @@ STAGE PLANS:
                   0 
                   1 
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 1044 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 36 Data size: 1044 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 36 Data size: 1044 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat


[2/3] hive git commit: HIVE-18571 : stats issues for MM tables; ACID doesn't check state for CTAS (Sergey Shelukhin, reviewed by Eugene Koifman)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
index 1abd3a2..6a5ace0 100644
--- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
@@ -45,22 +45,22 @@ STAGE PLANS:
                   alias: acidtbldefault
                   filterExpr: (a = 1) (type: boolean)
                   buckets included: [1,] of 16
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1765 Data size: 6712 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
                     predicate: (a = 1) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: 1 (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
                         GlobalTableId: 0
                         directory: hdfs://### HDFS PATH ###
                         NumFilesPerFileSink: 1
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE
                         Stats Publishing Key Prefix: hdfs://### HDFS PATH ###
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -98,8 +98,6 @@ STAGE PLANS:
                     location hdfs://### HDFS PATH ###
                     name default.acidtbldefault
                     numFiles 17
-                    numRows 0
-                    rawDataSize 0
                     serialization.ddl struct acidtbldefault { i32 a}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
@@ -122,8 +120,6 @@ STAGE PLANS:
                       location hdfs://### HDFS PATH ###
                       name default.acidtbldefault
                       numFiles 17
-                      numRows 0
-                      rawDataSize 0
                       serialization.ddl struct acidtbldefault { i32 a}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
index 64e5b17..05d5d0a 100644
--- a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
@@ -665,22 +665,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: over10k_orc_bucketed
-                  Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1227 Data size: 702030 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                     outputColumnNames: ROW__ID
-                    Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1227 Data size: 702030 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count()
                       keys: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 613 Data size: 51492 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
-                        Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 613 Data size: 51492 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -692,13 +692,13 @@ STAGE PLANS:
                 keys: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 613 Data size: 51492 Basic stats: COMPLETE Column stats: COMPLETE
                 Filter Operator
                   predicate: (_col1 > 1L) (type: boolean)
-                  Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 204 Data size: 17136 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 204 Data size: 17136 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/llap/default_constraint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/default_constraint.q.out b/ql/src/test/results/clientpositive/llap/default_constraint.q.out
index 89b1224..72ff7af 100644
--- a/ql/src/test/results/clientpositive/llap/default_constraint.q.out
+++ b/ql/src/test/results/clientpositive/llap/default_constraint.q.out
@@ -1490,8 +1490,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	numFiles            	1                   
-	totalSize           	1063                
+	COLUMN_STATS_ACCURATE	{}                  
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -1658,9 +1657,8 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 #### A masked pattern was here ####
-	numFiles            	2                   
-	totalSize           	2127                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -1737,9 +1735,8 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 #### A masked pattern was here ####
-	numFiles            	2                   
-	totalSize           	2127                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -1913,9 +1910,8 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 #### A masked pattern was here ####
-	numFiles            	3                   
-	totalSize           	3192                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -1991,9 +1987,8 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 #### A masked pattern was here ####
-	numFiles            	3                   
-	totalSize           	3192                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -2070,9 +2065,8 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 #### A masked pattern was here ####
-	numFiles            	3                   
-	totalSize           	3192                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -2644,8 +2638,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	numFiles            	1                   
-	totalSize           	1063                
+	COLUMN_STATS_ACCURATE	{}                  
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
index 6a97736..97f7cd1 100644
--- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
@@ -94,19 +94,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_part
-                  Statistics: Num rows: 1600 Data size: 150327 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 158 Data size: 60414 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 1911 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 1911 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 1911 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -115,10 +115,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 1911 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 1911 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -189,7 +189,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_part
-                  Statistics: Num rows: 1600 Data size: 444727 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 157 Data size: 102975 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
                     Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL
@@ -380,19 +380,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_part_sdpo
-                  Statistics: Num rows: 1600 Data size: 150327 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 174 Data size: 66399 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 1908 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 1908 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 1908 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -401,10 +401,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 1908 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 1908 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -475,7 +475,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_part_sdpo
-                  Statistics: Num rows: 1600 Data size: 444727 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 169 Data size: 110820 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
                     Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL
@@ -675,19 +675,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_2l_part
-                  Statistics: Num rows: 1600 Data size: 150327 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 154 Data size: 59496 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -696,10 +696,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -771,19 +771,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_2l_part
-                  Statistics: Num rows: 3200 Data size: 313367 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 1600 Data size: 156727 Basic stats: PARTIAL Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 455 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), hr (type: int)
                       outputColumnNames: _col0, _col4
-                      Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
+                        Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
                         value expressions: _col4 (type: int)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -793,10 +793,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col2 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -897,7 +897,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_2l_part
-                  Statistics: Num rows: 3200 Data size: 902167 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 1600 Data size: 451127 Basic stats: PARTIAL Column stats: PARTIAL
                   Filter Operator
                     predicate: (value = 'bar') (type: boolean)
                     Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
@@ -1095,19 +1095,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_2l_part_sdpo
-                  Statistics: Num rows: 1600 Data size: 150327 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 154 Data size: 59496 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -1116,10 +1116,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 1931 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1191,19 +1191,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_2l_part_sdpo
-                  Statistics: Num rows: 3200 Data size: 313367 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 1600 Data size: 156727 Basic stats: PARTIAL Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 455 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), hr (type: int)
                       outputColumnNames: _col0, _col4
-                      Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: '2008-04-08' (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: ++++
                         Map-reduce partition columns: '2008-04-08' (type: string), _col4 (type: int)
-                        Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
+                        Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -1212,11 +1212,11 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
-                Statistics: Num rows: 5 Data size: 1360 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
                 File Output Operator
                   compressed: false
                   Dp Sort State: PARTITION_BUCKET_SORTED
-                  Statistics: Num rows: 5 Data size: 1360 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1317,7 +1317,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_2l_part_sdpo
-                  Statistics: Num rows: 3200 Data size: 902167 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 1600 Data size: 451127 Basic stats: PARTIAL Column stats: PARTIAL
                   Filter Operator
                     predicate: (value = 'bar') (type: boolean)
                     Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
@@ -1515,7 +1515,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_2l_part_sdpo_no_cp
-                  Statistics: Num rows: 1600 Data size: 598664 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 95 Data size: 81408 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
                     Statistics: Num rows: 5 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL
@@ -1613,19 +1613,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_2l_part_sdpo_no_cp
-                  Statistics: Num rows: 3200 Data size: 1197144 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 1600 Data size: 598664 Basic stats: PARTIAL Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), key (type: string), ds (type: string), hr (type: int)
                       outputColumnNames: _col0, _col1, _col3, _col4
-                      Statistics: Num rows: 5 Data size: 2675 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: ++++
                         Map-reduce partition columns: _col3 (type: string), _col4 (type: int)
-                        Statistics: Num rows: 5 Data size: 2675 Basic stats: COMPLETE Column stats: PARTIAL
+                        Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
                         value expressions: _col1 (type: string), 'bar' (type: string)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -1635,11 +1635,11 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
-                Statistics: Num rows: 5 Data size: 3165 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
                 File Output Operator
                   compressed: false
                   Dp Sort State: PARTITION_BUCKET_SORTED
-                  Statistics: Num rows: 5 Data size: 3165 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
index 352f6ba..eeb6a7a 100644
--- a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
+++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
@@ -3233,19 +3233,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_uami
-                  Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 239 Data size: 75112 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 1571 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), i (type: int), vc (type: varchar(128))
                       outputColumnNames: _col0, _col1, _col3
-                      Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 1571 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 1571 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: int), _col3 (type: varchar(128))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -3255,10 +3255,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 1571 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 1571 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -3326,19 +3326,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: acid_uami
-                  Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 277 Data size: 86920 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((de = 3.14) and enforce_constraint(i is not null) and enforce_constraint(vc is not null)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 313 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), i (type: int), vc (type: varchar(128))
                       outputColumnNames: _col0, _col1, _col3
-                      Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 313 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 313 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: int), _col3 (type: varchar(128))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -3348,10 +3348,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 313 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 313 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
index d8863a2..1ef7b87 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
@@ -704,19 +704,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable_2
-                  Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 42 Data size: 4872 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10.1) and a is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -730,7 +730,7 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 23 Data size: 2668 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
                   mode: hash
@@ -982,19 +982,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable_2
-                  Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 42 Data size: 4872 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10.1) and a is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -1008,7 +1008,7 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 23 Data size: 2668 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
                   mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
index d8863a2..1ef7b87 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
@@ -704,19 +704,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable_2
-                  Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 42 Data size: 4872 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10.1) and a is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -730,7 +730,7 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 23 Data size: 2668 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
                   mode: hash
@@ -982,19 +982,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable_2
-                  Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 42 Data size: 4872 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10.1) and a is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -1008,7 +1008,7 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 23 Data size: 2668 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
                   mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/llap/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out
index 23e733b..7c8a38f 100644
--- a/ql/src/test/results/clientpositive/llap/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out
@@ -1623,10 +1623,10 @@ POSTHOOK: Output: default@multi1_mm@p=1
 POSTHOOK: Output: default@multi1_mm@p=455
 POSTHOOK: Output: default@multi1_mm@p=456
 POSTHOOK: Output: default@multi1_mm@p=457
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
@@ -1700,10 +1700,10 @@ POSTHOOK: Input: default@intermediate@p=455
 POSTHOOK: Input: default@intermediate@p=456
 POSTHOOK: Input: default@intermediate@p=457
 POSTHOOK: Output: default@multi1_mm@p=1
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p
 PREHOOK: type: QUERY
 PREHOOK: Input: default@multi1_mm
@@ -1815,7 +1815,6 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
 	numFiles            	3                   
 	numRows             	6                   
 	rawDataSize         	13                  
@@ -1865,7 +1864,6 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
 	numFiles            	6                   
 	numRows             	12                  
 	rawDataSize         	26                  
@@ -1923,7 +1921,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	55                  
 	numRows             	500                 
 	rawDataSize         	5312                

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
index 29e408c..6561456 100644
--- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
+++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite_3.q.out
@@ -710,19 +710,19 @@ STAGE PLANS:
                   Statistics: Num rows: 5 Data size: 1205 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: cmv_basetable_2
-            Statistics: Num rows: 3 Data size: 727 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 42 Data size: 23560 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: ((c > 10.1) and a is not null) (type: boolean)
-              Statistics: Num rows: 1 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 14 Data size: 7853 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: a (type: int), c (type: decimal(10,2))
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 14 Data size: 7853 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 1 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 14 Data size: 7853 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: decimal(10,2))
       Reduce Operator Tree:
         Join Operator
@@ -732,12 +732,12 @@ STAGE PLANS:
             0 _col0 (type: int)
             1 _col0 (type: int)
           outputColumnNames: _col0, _col2
-          Statistics: Num rows: 5 Data size: 1325 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 15 Data size: 8638 Basic stats: COMPLETE Column stats: NONE
           Group By Operator
             keys: _col0 (type: int), _col2 (type: decimal(10,2))
             mode: hash
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 5 Data size: 1325 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 15 Data size: 8638 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -753,20 +753,20 @@ STAGE PLANS:
               key expressions: _col0 (type: int), _col1 (type: decimal(10,2))
               sort order: ++
               Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2))
-              Statistics: Num rows: 5 Data size: 1325 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 15 Data size: 8638 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 4031 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 7 Data size: 4031 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 7 Data size: 4031 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1013,19 +1013,19 @@ STAGE PLANS:
                   Statistics: Num rows: 5 Data size: 1205 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: cmv_basetable_2
-            Statistics: Num rows: 3 Data size: 727 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 42 Data size: 23560 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: ((c > 10.1) and a is not null) (type: boolean)
-              Statistics: Num rows: 1 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 14 Data size: 7853 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: a (type: int), c (type: decimal(10,2))
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 14 Data size: 7853 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 1 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 14 Data size: 7853 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: decimal(10,2))
       Reduce Operator Tree:
         Join Operator
@@ -1035,12 +1035,12 @@ STAGE PLANS:
             0 _col0 (type: int)
             1 _col0 (type: int)
           outputColumnNames: _col0, _col2
-          Statistics: Num rows: 5 Data size: 1325 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 15 Data size: 8638 Basic stats: COMPLETE Column stats: NONE
           Group By Operator
             keys: _col0 (type: int), _col2 (type: decimal(10,2))
             mode: hash
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 5 Data size: 1325 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 15 Data size: 8638 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -1056,20 +1056,20 @@ STAGE PLANS:
               key expressions: _col0 (type: int), _col1 (type: decimal(10,2))
               sort order: ++
               Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2))
-              Statistics: Num rows: 5 Data size: 1325 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 15 Data size: 8638 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 4031 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 7 Data size: 4031 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 2 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 7 Data size: 4031 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_all.q.out b/ql/src/test/results/clientpositive/mm_all.q.out
index ac6c080..a0cbdb5 100644
--- a/ql/src/test/results/clientpositive/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/mm_all.q.out
@@ -1637,10 +1637,10 @@ POSTHOOK: Output: default@multi1_mm@p=1
 POSTHOOK: Output: default@multi1_mm@p=455
 POSTHOOK: Output: default@multi1_mm@p=456
 POSTHOOK: Output: default@multi1_mm@p=457
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
@@ -1829,7 +1829,6 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
 	numFiles            	1                   
 	numRows             	6                   
 	rawDataSize         	13                  
@@ -1879,7 +1878,6 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
 	numFiles            	2                   
 	numRows             	12                  
 	rawDataSize         	26                  
@@ -1937,7 +1935,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/mm_default.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_default.q.out b/ql/src/test/results/clientpositive/mm_default.q.out
index 1345efd..f5341ed 100644
--- a/ql/src/test/results/clientpositive/mm_default.q.out
+++ b/ql/src/test/results/clientpositive/mm_default.q.out
@@ -180,7 +180,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	1                   
 	numRows             	1                   
 	rawDataSize         	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
index 92a04dd..114f9a8 100644
--- a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
+++ b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
@@ -680,22 +680,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: over10k_orc_bucketed
-                  Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1227 Data size: 702030 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                     outputColumnNames: ROW__ID
-                    Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1227 Data size: 702030 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count()
                       keys: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 613 Data size: 51492 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
-                        Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 613 Data size: 51492 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -704,13 +704,13 @@ STAGE PLANS:
                 keys: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 613 Data size: 51492 Basic stats: COMPLETE Column stats: COMPLETE
                 Filter Operator
                   predicate: (_col1 > 1L) (type: boolean)
-                  Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 204 Data size: 17136 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 204 Data size: 17136 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
index 7f18f2b..8d67fe4 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
@@ -295,16 +295,16 @@ Stage-3
               Reducer 2
               File Output Operator [FS_8]
                 table:{"name:":"default.acid_uami"}
-                Select Operator [SEL_4] (rows=1/2 width=328)
+                Select Operator [SEL_4] (rows=10/2 width=321)
                   Output:["_col0","_col1","_col2","_col3"]
                 <-Map 1 [SIMPLE_EDGE]
                   SHUFFLE [RS_3]
                     PartitionCols:UDFToInteger(_col0)
-                    Select Operator [SEL_2] (rows=1/2 width=328)
+                    Select Operator [SEL_2] (rows=10/2 width=321)
                       Output:["_col0","_col1","_col3"]
-                      Filter Operator [FIL_9] (rows=1/2 width=328)
+                      Filter Operator [FIL_9] (rows=10/2 width=321)
                         predicate:((de = 109.23) or (de = 119.23))
-                        TableScan [TS_0] (rows=1/4 width=328)
+                        TableScan [TS_0] (rows=49/4 width=321)
                           default@acid_uami,acid_uami, ACID table,Tbl:COMPLETE,Col:NONE,Output:["i","de","vc"]
 
 PREHOOK: query: select * from acid_uami order by de

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index 5919089..78ea01d 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@ -270,6 +270,7 @@ public class StatsSetupConst {
     if (params == null) {
       return;
     }
+
     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
     stats.columnStats.clear();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 89354a2..0dd3eb1 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -297,7 +297,7 @@ public class HiveAlterHandler implements AlterHandler {
             !isPartitionedTable) {
           Database db = msdb.getDatabase(newDbName);
           // Update table stats. For partitioned table, we update stats in alterPartition()
-          MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true, environmentContext);
+          MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true, environmentContext, false);
         }
 
         if (isPartitionedTable) {
@@ -436,23 +436,25 @@ public class HiveAlterHandler implements AlterHandler {
           .currentTimeMillis() / 1000));
     }
 
-    Table tbl = msdb.getTable(dbname, name);
-    if (tbl == null) {
-      throw new InvalidObjectException(
-          "Unable to alter partition because table or database does not exist.");
-    }
 
     //alter partition
     if (part_vals == null || part_vals.size() == 0) {
       try {
         msdb.openTransaction();
+
+        Table tbl = msdb.getTable(dbname, name);
+        if (tbl == null) {
+          throw new InvalidObjectException(
+              "Unable to alter partition because table or database does not exist.");
+        }
         oldPart = msdb.getPartition(dbname, name, new_part.getValues());
         if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
           // if stats are same, no need to update
           if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) {
             MetaStoreUtils.updateBasicState(environmentContext, new_part.getParameters());
           } else {
-            MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext);
+            MetaStoreUtils.updatePartitionStatsFast(
+                new_part, tbl, wh, false, true, environmentContext, false);
           }
         }
 
@@ -494,6 +496,11 @@ public class HiveAlterHandler implements AlterHandler {
     boolean dataWasMoved = false;
     try {
       msdb.openTransaction();
+      Table tbl = msdb.getTable(dbname, name);
+      if (tbl == null) {
+        throw new InvalidObjectException(
+            "Unable to alter partition because table or database does not exist.");
+      }
       try {
         oldPart = msdb.getPartition(dbname, name, part_vals);
       } catch (NoSuchObjectException e) {
@@ -581,7 +588,8 @@ public class HiveAlterHandler implements AlterHandler {
       }
 
       if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
-        MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext);
+        MetaStoreUtils.updatePartitionStatsFast(
+            new_part, tbl, wh, false, true, environmentContext, false);
       }
 
       String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues());
@@ -650,15 +658,16 @@ public class HiveAlterHandler implements AlterHandler {
       transactionalListeners = handler.getTransactionalListeners();
     }
 
-    Table tbl = msdb.getTable(dbname, name);
-    if (tbl == null) {
-      throw new InvalidObjectException(
-          "Unable to alter partitions because table or database does not exist.");
-    }
 
     boolean success = false;
     try {
       msdb.openTransaction();
+
+      Table tbl = msdb.getTable(dbname, name);
+      if (tbl == null) {
+        throw new InvalidObjectException(
+            "Unable to alter partitions because table or database does not exist.");
+      }
       for (Partition tmpPart: new_parts) {
         // Set DDL time to now if not specified
         if (tmpPart.getParameters() == null ||
@@ -677,7 +686,8 @@ public class HiveAlterHandler implements AlterHandler {
           if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) {
             MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters());
           } else {
-            MetaStoreUtils.updatePartitionStatsFast(tmpPart, wh, false, true, environmentContext);
+            MetaStoreUtils.updatePartitionStatsFast(
+                tmpPart, tbl, wh, false, true, environmentContext, false);
           }
         }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 662de9a..c99c3af 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -1488,7 +1488,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
         if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
             !MetaStoreUtils.isView(tbl)) {
-          MetaStoreUtils.updateTableStatsFast(db, tbl, wh, madeDir, envContext);
+          MetaStoreUtils.updateTableStatsFast(db, tbl, wh, madeDir, false, envContext, true);
         }
 
         // set create time
@@ -2673,7 +2673,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
             !MetaStoreUtils.isView(tbl)) {
-          MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, envContext);
+          MetaStoreUtils.updatePartitionStatsFast(part, tbl, wh, madeDir, false, envContext, true);
         }
 
         if (ms.addPartition(part)) {
@@ -3241,7 +3241,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final Table tbl, final PartitionSpecProxy.PartitionIterator part, boolean madeDir) throws MetaException {
       if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
           !MetaStoreUtils.isView(tbl)) {
-        MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, false, null);
+        MetaStoreUtils.updatePartitionStatsFast(part, tbl, wh, madeDir, false, null, true);
       }
 
       // set create time

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
index 20c1060..445a7b8 100755
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -543,7 +543,7 @@ public class Warehouse {
    * @return array of FileStatus objects corresponding to the files
    * making up the passed storage description
    */
-  public FileStatus[] getFileStatusesForSD(StorageDescriptor desc)
+  public List<FileStatus> getFileStatusesForSD(StorageDescriptor desc)
       throws MetaException {
     return getFileStatusesForLocation(desc.getLocation());
   }
@@ -553,7 +553,7 @@ public class Warehouse {
    * @return array of FileStatus objects corresponding to the files
    * making up the passed storage description
    */
-  public FileStatus[] getFileStatusesForLocation(String location)
+  public List<FileStatus> getFileStatusesForLocation(String location)
       throws MetaException {
     try {
       Path path = new Path(location);
@@ -571,7 +571,7 @@ public class Warehouse {
    * @return array of FileStatus objects corresponding to the files making up the passed
    * unpartitioned table
    */
-  public FileStatus[] getFileStatusesForUnpartitionedTable(Database db, Table table)
+  public List<FileStatus> getFileStatusesForUnpartitionedTable(Database db, Table table)
       throws MetaException {
     Path tablePath = getDnsPath(new Path(table.getSd().getLocation()));
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/9b36ffa9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
index b44ff8c..4138fa5 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.metastore.utils;
 
+import org.apache.curator.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
@@ -314,11 +315,11 @@ public class FileUtils {
    * @return array of FileStatus
    * @throws IOException
    */
-  public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs)
+  public static List<FileStatus> getFileStatusRecurse(Path path, int level, FileSystem fs)
       throws IOException {
 
     // if level is <0, the return all files/directories under the specified path
-    if ( level < 0) {
+    if (level < 0) {
       List<FileStatus> result = new ArrayList<>();
       try {
         FileStatus fileStatus = fs.getFileStatus(path);
@@ -328,9 +329,9 @@ public class FileUtils {
         // does not exist. But getFileStatus() throw IOException. To mimic the
         // similar behavior we will return empty array on exception. For external
         // tables, the path of the table will not exists during table creation
-        return new FileStatus[0];
+        return new ArrayList<>(0);
       }
-      return result.toArray(new FileStatus[result.size()]);
+      return result;
     }
 
     // construct a path pattern (e.g., /*/*) to find all dynamically generated paths
@@ -339,7 +340,7 @@ public class FileUtils {
       sb.append(Path.SEPARATOR).append("*");
     }
     Path pathPattern = new Path(path, sb.toString());
-    return fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER);
+    return Lists.newArrayList(fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER));
   }
 
   /**