You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2017/10/13 00:16:21 UTC

[42/50] [abbrv] hive git commit: HIVE-17674 : grep TODO HIVE-15212.17.patch |wc - l = 49 (Sergey Shelukhin)

HIVE-17674 : grep TODO HIVE-15212.17.patch |wc - l = 49 (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/64015972
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/64015972
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/64015972

Branch: refs/heads/master
Commit: 640159726f4406e752e9555b0b9a54452d8e9157
Parents: 77f3f28
Author: sergey <se...@apache.org>
Authored: Tue Oct 3 18:59:41 2017 -0700
Committer: sergey <se...@apache.org>
Committed: Tue Oct 3 18:59:41 2017 -0700

----------------------------------------------------------------------
 .../hcatalog/mapreduce/HCatOutputFormat.java    |  3 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |  4 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   | 58 ++-----------------
 .../TransactionalValidationListener.java        |  4 +-
 .../apache/hadoop/hive/ql/exec/CopyTask.java    |  2 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 24 ++++----
 .../hadoop/hive/ql/exec/FetchOperator.java      |  4 +-
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |  2 +-
 .../hive/ql/exec/HashTableSinkOperator.java     |  3 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |  3 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  3 -
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 59 ++++++++++++++++++--
 .../hive/ql/io/CombineHiveInputFormat.java      |  2 +-
 .../hadoop/hive/ql/io/HiveInputFormat.java      | 11 ++--
 .../apache/hadoop/hive/ql/metadata/Hive.java    | 20 +++----
 .../formatting/JsonMetaDataFormatter.java       |  2 +-
 .../formatting/TextMetaDataFormatter.java       |  3 +-
 .../ql/optimizer/AbstractBucketJoinProc.java    |  3 +-
 .../BucketingSortingReduceSinkOptimizer.java    |  4 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       | 11 +---
 .../hadoop/hive/ql/optimizer/SamplePruner.java  |  5 +-
 .../optimizer/physical/SamplingOptimizer.java   |  3 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |  2 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  4 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   | 25 +++++----
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |  4 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 18 +++---
 .../hadoop/hive/ql/plan/FileSinkDesc.java       |  4 +-
 .../hadoop/hive/ql/plan/LoadTableDesc.java      |  4 +-
 .../apache/hadoop/hive/ql/plan/MoveWork.java    |  8 ---
 .../hive/ql/txn/compactor/CompactorMR.java      |  2 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |  2 +-
 32 files changed, 147 insertions(+), 159 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
index bbd594a..2654212 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Index;
@@ -114,7 +115,7 @@ public class HCatOutputFormat extends HCatBaseOutputFormat {
         throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with sorted column definition from Pig/Mapreduce is not supported");
       }
 
-      if (MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
+      if (AcidUtils.isInsertOnlyTable(table.getParameters())) {
         throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into an insert-only ACID table from Pig/Mapreduce is not supported");
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 375b34a..497b5af 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2378,7 +2378,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Exception ex = null;
       try {
         t = get_table_core(dbname, name);
-        if (MetaStoreUtils.isInsertOnlyTable(t.getParameters())) {
+        if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) {
           assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES,
               "insert-only tables", "get_table_req");
         }
@@ -2510,7 +2510,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           startIndex = endIndex;
         }
         for (Table t : tables) {
-          if (MetaStoreUtils.isInsertOnlyTable(t.getParameters())) {
+          if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) {
             assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES,
                 "insert-only tables", "get_table_req");
           }

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 235d042..96c8871 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -1821,58 +1821,6 @@ public class MetaStoreUtils {
     return cols;
   }
 
-  // TODO The following two utility methods can be moved to AcidUtils once no class in metastore is relying on them,
-  // right now ObjectStore.getAllMmTablesForCleanup is calling these method
-  /**
-   * Checks if a table is an ACID table that only supports INSERT, but not UPDATE/DELETE
-   * @param params table properties
-   * @return true if table is an INSERT_ONLY table, false otherwise
-   */
-  public static boolean isInsertOnlyTable(Map<String, String> params) {
-    return isInsertOnlyTable(params, false);
-  }
-
-  // TODO: CTAS for MM may currently be broken. It used to work. See the old code and why isCtas isn't used?
-  public static boolean isInsertOnlyTable(Map<String, String> params, boolean isCtas) {
-    String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
-    return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
-  }
-
-   public static boolean isInsertOnlyTable(Properties params) {
-     String transactionalProp = params.getProperty(
-         hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
-     return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
-  }
-
-   /** The method for altering table props; may set the table to MM, non-MM, or not affect MM. */
-  public static Boolean isToInsertOnlyTable(Map<String, String> props) {
-    // TODO: Setting these separately is a very hairy issue in certain combinations, since we
-    //       cannot decide what type of table this becomes without taking both into account, and
-    //       in many cases the conversion might be illegal.
-    //       The only thing we allow is tx = true w/o tx-props, for backward compat.
-    String transactional = props.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
-    String transactionalProp = props.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
-    if (transactional == null && transactionalProp == null) return null; // Not affected.
-    boolean isSetToTxn = "true".equalsIgnoreCase(transactional);
-    if (transactionalProp == null) {
-      if (isSetToTxn) return false; // Assume the full ACID table.
-      throw new RuntimeException("Cannot change '" + hive_metastoreConstants.TABLE_IS_TRANSACTIONAL
-          + "' without '" + hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES + "'");
-    }
-    if (!"insert_only".equalsIgnoreCase(transactionalProp)) return false; // Not MM.
-    if (!isSetToTxn) {
-      throw new RuntimeException("Cannot set '"
-          + hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES + "' to 'insert_only' without "
-          + "setting '" + hive_metastoreConstants.TABLE_IS_TRANSACTIONAL + "' to 'true'");
-    }
-    return true;
-  }
-
-  public static boolean isRemovedInsertOnlyTable(Set<String> removedSet) {
-    boolean hasTxn = removedSet.contains(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL),
-        hasProps = removedSet.contains(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
-    return hasTxn || hasProps;
-  }
 
   // given a list of partStats, this function will give you an aggr stats
   public static List<ColumnStatisticsObj> aggrPartitionStats(List<ColumnStatistics> partStats,
@@ -2056,4 +2004,10 @@ public class MetaStoreUtils {
     ipAddress = (ipAddress == null) ? StringUtils.EMPTY : ipAddress;
     return machineList.includes(ipAddress);
   }
+
+  /** Duplicates AcidUtils; used in a couple places in metastore. */
+  public static boolean isInsertOnlyTableParam(Map<String, String> params) {
+    String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
+    return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
index dabede4..29d8da8 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
@@ -139,7 +139,7 @@ public final class TransactionalValidationListener extends MetaStorePreEventList
       hasValidTransactionalValue = true;
     }
 
-    if (!hasValidTransactionalValue && !MetaStoreUtils.isInsertOnlyTable(oldTable.getParameters())) {
+    if (!hasValidTransactionalValue && !MetaStoreUtils.isInsertOnlyTableParam(oldTable.getParameters())) {
       // if here, there is attempt to set transactional to something other than 'true'
       // and NOT the same value it was before
       throw new MetaException("TBLPROPERTIES with 'transactional'='true' cannot be unset");
@@ -158,7 +158,7 @@ public final class TransactionalValidationListener extends MetaStorePreEventList
         // null and an attempt is made to set it. This behaviour can be changed in the future.
         if ((oldTransactionalPropertiesValue == null
             || !oldTransactionalPropertiesValue.equalsIgnoreCase(transactionalPropertiesValue))
-            && !MetaStoreUtils.isInsertOnlyTable(oldTable.getParameters())) {
+            && !MetaStoreUtils.isInsertOnlyTableParam(oldTable.getParameters())) {
           throw new MetaException("TBLPROPERTIES with 'transactional_properties' cannot be "
               + "altered after the table is created");
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
index 7299ed1..664a11d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
@@ -108,7 +108,7 @@ public class CopyTask extends Task<CopyWork> implements Serializable {
       FileSystem fs, Path path, boolean isSourceMm) throws IOException {
     if (!fs.exists(path)) return null;
     if (!isSourceMm) return matchFilesOneDir(fs, path, null);
-    // TODO: this doesn't handle list bucketing properly. Does the original exim do that?
+    // Note: this doesn't handle list bucketing properly; neither does the original code.
     FileStatus[] mmDirs = fs.listStatus(path, new JavaUtils.AnyIdDirFilter());
     if (mmDirs == null || mmDirs.length == 0) return null;
     List<FileStatus> allFiles = new ArrayList<FileStatus>();

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 4cf0f89..205ca77 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -1911,7 +1911,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException {
 
     Table tbl = db.getTable(desc.getTableName());
-    if (!AcidUtils.isFullAcidTable(tbl) && !MetaStoreUtils.isInsertOnlyTable(tbl.getParameters())) {
+    if (!AcidUtils.isFullAcidTable(tbl) && !AcidUtils.isInsertOnlyTable(tbl.getParameters())) {
       throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, tbl.getDbName(),
           tbl.getTableName());
     }
@@ -4098,8 +4098,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     List<Task<?>> result = null;
     if (part == null) {
       Set<String> removedSet = alterTbl.getProps().keySet();
-      boolean isFromMmTable = MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()),
-          isRemoved = MetaStoreUtils.isRemovedInsertOnlyTable(removedSet);
+      boolean isFromMmTable = AcidUtils.isInsertOnlyTable(tbl.getParameters()),
+          isRemoved = AcidUtils.isRemovedInsertOnlyTable(removedSet);
       if (isFromMmTable && isRemoved) {
         result = generateRemoveMmTasks(tbl);
       }
@@ -4124,11 +4124,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     // operation commits. Deleting uncommitted things is safe, but moving stuff before we convert
     // could cause data loss.
     List<Path> allMmDirs = new ArrayList<>();
-    if (tbl.isStoredAsSubDirectories()) {
-      // TODO: support this? we only bail because it's a PITA and hardly anyone seems to care.
-      throw new HiveException("Converting list bucketed tables stored as subdirectories "
-          + " to and from MM is not supported");
-    }
+    checkMmLb(tbl);
     List<String> bucketCols = tbl.getBucketCols();
     if (bucketCols != null && !bucketCols.isEmpty()
         && HiveConf.getBoolVar(conf, ConfVars.HIVE_STRICT_CHECKS_BUCKETING)) {
@@ -4175,16 +4171,16 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
 
   private void checkMmLb(Table tbl) throws HiveException {
     if (!tbl.isStoredAsSubDirectories()) return;
-    // TODO: support this?
+    // TODO [MM gap?]: by design; no-one seems to use LB tables. They will work, but not convert.
+    //                 It's possible to work around this by re-creating and re-inserting the table.
     throw new HiveException("Converting list bucketed tables stored as subdirectories "
-        + " to and from MM is not supported");
+        + " to and from MM is not supported. Please re-create a table in the desired format.");
   }
 
   private void checkMmLb(Partition part) throws HiveException {
     if (!part.isStoredAsSubDirectories()) return;
-    // TODO: support this?
     throw new HiveException("Converting list bucketed tables stored as subdirectories "
-        + " to and from MM is not supported. Please create a table in the desired format.");
+        + " to and from MM is not supported. Please re-create a table in the desired format.");
   }
 
   private void handleRemoveMm(
@@ -4289,8 +4285,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     if (part != null) {
       part.getTPartition().getParameters().putAll(alterTbl.getProps());
     } else {
-      boolean isFromMmTable = MetaStoreUtils.isInsertOnlyTable(tbl.getParameters());
-      Boolean isToMmTable = MetaStoreUtils.isToInsertOnlyTable(alterTbl.getProps());
+      boolean isFromMmTable = AcidUtils.isInsertOnlyTable(tbl.getParameters());
+      Boolean isToMmTable = AcidUtils.isToInsertOnlyTable(alterTbl.getProps());
       if (isToMmTable != null) {
         if (!isFromMmTable && isToMmTable) {
           result = generateAddMmTasks(tbl);

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index d09b12b..b85a243 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -275,7 +275,7 @@ public class FetchOperator implements Serializable {
       FileSystem fs = currPath.getFileSystem(job);
       if (fs.exists(currPath)) {
         if (extractValidTxnList() != null &&
-            MetaStoreUtils.isInsertOnlyTable(currDesc.getTableDesc().getProperties())) {
+            AcidUtils.isInsertOnlyTable(currDesc.getTableDesc().getProperties())) {
           return true;
         }
         for (FileStatus fStat : listStatusUnderPath(fs, currPath)) {
@@ -408,7 +408,7 @@ public class FetchOperator implements Serializable {
       return StringUtils.escapeString(currPath.toString()); // No need to process here.
     }
     ValidTxnList validTxnList;
-    if (MetaStoreUtils.isInsertOnlyTable(currDesc.getTableDesc().getProperties())) {
+    if (AcidUtils.isInsertOnlyTable(currDesc.getTableDesc().getProperties())) {
       validTxnList = extractValidTxnList();
     } else {
       validTxnList = null;  // non-MM case

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 280d3cf..93b967f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -1303,7 +1303,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         } else {
           int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(),
               lbLevels = lbCtx == null ? 0 : lbCtx.calculateListBucketingLevel();
-          // TODO: why is it stored in both?
+          // TODO: why is it stored in both table and dpCtx?
           int numBuckets = (conf.getTable() != null) ? conf.getTable().getNumBuckets()
               : (dpCtx != null ? dpCtx.getNumBuckets() : 0);
           MissingBucketsContext mbc = new MissingBucketsContext(

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
index 0b8045e..9690acb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
@@ -301,7 +301,8 @@ public class HashTableSinkOperator extends TerminalOperator<HashTableSinkDesc> i
       String bigBucketFileName = getExecContext().getCurrentBigBucketFile();
       String fileName = getExecContext().getLocalWork().getBucketFileName(bigBucketFileName);
       // get the tmp URI path; it will be a hdfs path if not local mode
-      // TODO: this doesn't work... the path for writer and reader mismatch
+      // TODO [MM gap?]: this doesn't work, however this is MR only.
+      //      The path for writer and reader mismatch:
       //      Dump the side-table for tag ... -local-10004/HashTable-Stage-1/MapJoin-a-00-(ds%3D2008-04-08)mm_2.hashtable
       //      Load back 1 hashtable file      -local-10004/HashTable-Stage-1/MapJoin-a-00-srcsortbucket3outof4.txt.hashtable
       //      Hive3 probably won't support MR so do we really care? 

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index e644845..7cc5c30 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -266,7 +266,6 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
 
   @Override
   public int execute(DriverContext driverContext) {
-    if (work.isNoop()) return 0;
     if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
       Utilities.FILE_OP_LOGGER.trace("Executing MoveWork " + System.identityHashCode(work)
         + " with " + work.getLoadFileWork() + "; " + work.getLoadTableWork() + "; "
@@ -713,7 +712,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
 
     boolean updateBucketCols = false;
     if (bucketCols != null) {
-      // TODO: this particular bit will not work for MM tables, as there can be multiple
+      // Note: this particular bit will not work for MM tables, as there can be multiple
       //       directories for different MM IDs. We could put the path here that would account
       //       for the current MM ID being written, but it will not guarantee that other MM IDs
       //       have the correct buckets. The existing code discards the inferred data when the

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 99adf7f..c702a24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -1642,7 +1642,6 @@ public final class Utilities {
           addFilesToPathSet(taskIDToFile.values(), filesKept);
         }
 
-        // TODO: not clear why two if conditions are different. Preserve the existing logic for now.
         addBucketFileToResults(taskIDToFile, numBuckets, hconf, result);
       }
     } else {
@@ -1672,7 +1671,6 @@ public final class Utilities {
           addFilesToPathSet(taskIDToFile.values(), filesKept);
         }
       }
-      // TODO: not clear why two if conditions are different. Preserve the existing logic for now.
       addBucketFileToResults2(taskIDToFile, numBuckets, hconf, result);
     }
 
@@ -4179,7 +4177,6 @@ public final class Utilities {
     Utilities.FILE_OP_LOGGER.debug("Looking for files in: " + specPath);
     JavaUtils.IdPathFilter filter = new JavaUtils.IdPathFilter(txnId, stmtId, true);
     if (isMmCtas && !fs.exists(specPath)) {
-      // TODO: do we also need to do this when creating an empty partition from select?
       Utilities.FILE_OP_LOGGER.info("Creating table directory for CTAS with no output at " + specPath);
       FileUtils.mkdir(fs, specPath, hconf);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index a461bb9..4c0b71f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -26,6 +26,7 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.Set;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
@@ -447,8 +448,6 @@ public class AcidUtils {
           case HASH_BASED_MERGE_STRING:
             obj.setHashBasedMerge(true);
             break;
-          case INSERT_ONLY_STRING:
-            obj.setInsertOnly(true);
           default:
             throw new IllegalArgumentException(
                 "Unexpected value " + option + " for ACID operational properties!");
@@ -1052,7 +1051,7 @@ public class AcidUtils {
       String deltaPrefix =
               (fn.startsWith(DELTA_PREFIX)) ? DELTA_PREFIX : DELETE_DELTA_PREFIX;
       ParsedDelta delta = parseDelta(child, deltaPrefix);
-      if (tblproperties != null && MetaStoreUtils.isInsertOnlyTable(tblproperties) &&
+      if (tblproperties != null && AcidUtils.isInsertOnlyTable(tblproperties) &&
           ValidTxnList.RangeResponse.ALL == txnList.isTxnRangeAborted(delta.minTransaction, delta.maxTransaction)) {
         aborted.add(child);
       }
@@ -1210,7 +1209,7 @@ public class AcidUtils {
   }
 
   public static boolean isFullAcidTable(Table table) {
-    return isAcidTable(table) && !MetaStoreUtils.isInsertOnlyTable(table.getParameters());
+    return isAcidTable(table) && !AcidUtils.isInsertOnlyTable(table.getParameters());
   }
 
   /**
@@ -1327,4 +1326,56 @@ public class AcidUtils {
      */
     throw new IOException(lengths + " found but is not readable.  Consider waiting or orcfiledump --recover");
   }
+
+
+  /**
+   * Checks if a table is an ACID table that only supports INSERT, but not UPDATE/DELETE
+   * @param params table properties
+   * @return true if table is an INSERT_ONLY table, false otherwise
+   */
+  public static boolean isInsertOnlyTable(Map<String, String> params) {
+    return isInsertOnlyTable(params, false);
+  }
+
+  // TODO [MM gap]: CTAS may currently be broken. It used to work. See the old code, and why isCtas isn't used?
+  public static boolean isInsertOnlyTable(Map<String, String> params, boolean isCtas) {
+    String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
+    return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
+  }
+
+  public static boolean isInsertOnlyTable(Properties params) {
+    String transactionalProp = params.getProperty(
+        hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
+    return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
+  }
+
+   /** The method for altering table props; may set the table to MM, non-MM, or not affect MM. */
+  public static Boolean isToInsertOnlyTable(Map<String, String> props) {
+    // Note: Setting these separately is a very hairy issue in certain combinations, since we
+    //       cannot decide what type of table this becomes without taking both into account, and
+    //       in many cases the conversion might be illegal.
+    //       The only thing we allow is tx = true w/o tx-props, for backward compat.
+    String transactional = props.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+    String transactionalProp = props.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
+    if (transactional == null && transactionalProp == null) return null; // Not affected.
+    boolean isSetToTxn = "true".equalsIgnoreCase(transactional);
+    if (transactionalProp == null) {
+      if (isSetToTxn) return false; // Assume the full ACID table.
+      throw new RuntimeException("Cannot change '" + hive_metastoreConstants.TABLE_IS_TRANSACTIONAL
+          + "' without '" + hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES + "'");
+    }
+    if (!"insert_only".equalsIgnoreCase(transactionalProp)) return false; // Not MM.
+    if (!isSetToTxn) {
+      throw new RuntimeException("Cannot set '"
+          + hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES + "' to 'insert_only' without "
+          + "setting '" + hive_metastoreConstants.TABLE_IS_TRANSACTIONAL + "' to 'true'");
+    }
+    return true;
+  }
+
+  public static boolean isRemovedInsertOnlyTable(Set<String> removedSet) {
+    boolean hasTxn = removedSet.contains(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL),
+        hasProps = removedSet.contains(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
+    return hasTxn || hasProps;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
index 93de69f..6a188ac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
@@ -118,7 +118,7 @@ public class CombineHiveInputFormat<K extends WritableComparable, V extends Writ
         TableDesc tbl = part.getTableDesc();
         boolean isMmNonMerge = false;
         if (tbl != null) {
-          isMmNonMerge = !isMerge && MetaStoreUtils.isInsertOnlyTable(tbl.getProperties());
+          isMmNonMerge = !isMerge && AcidUtils.isInsertOnlyTable(tbl.getProperties());
         } else {
           // This would be the case for obscure tasks like truncate column (unsupported for MM).
           Utilities.FILE_OP_LOGGER.warn("Assuming not insert-only; no table in partition spec " + part);

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index fba5b7e..6a1dc72 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -459,7 +459,7 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
       TableDesc table, List<InputSplit> result)
           throws IOException {
     ValidTxnList validTxnList;
-    if (MetaStoreUtils.isInsertOnlyTable(table.getProperties())) {
+    if (AcidUtils.isInsertOnlyTable(table.getProperties())) {
       String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
       validTxnList = txnString == null ? new ValidReadTxnList() : new ValidReadTxnList(txnString);
     } else {
@@ -549,7 +549,7 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
         } else if (!hadAcidState) {
           AcidUtils.Directory dirInfo = AcidUtils.getAcidState(currDir, conf, validTxnList, Ref.from(false), true, null);
           hadAcidState = true;
-          // TODO: for IOW, we also need to count in base dir, if any
+          // TODO [MM gap]: for IOW, we also need to count in base dir, if any
           for (AcidUtils.ParsedDelta delta : dirInfo.getCurrentDirectories()) {
             Utilities.FILE_OP_LOGGER.debug("Adding input " + delta.getPath());
             finalPaths.add(delta.getPath());
@@ -706,9 +706,10 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
       throws IOException {
     PartitionDesc partDesc = pathToPartitionInfo.get(dir);
     if (partDesc == null) {
-      // TODO: HiveFileFormatUtils.getPartitionDescFromPathRecursively for MM tables?
-      //       So far, the only case when this is called for a MM directory was in error.
-      //       Keep it like this for now; may need replacement if we find a valid usage like this.
+      // Note: we could call HiveFileFormatUtils.getPartitionDescFromPathRecursively for MM tables.
+      //       The recursive call is usually needed for non-MM tables, because the path management
+      //       is not strict and the code does whatever. That should not happen for MM tables.
+      //       Keep it like this for now; may need replacement if we find a valid use case.
       partDesc = pathToPartitionInfo.get(Path.getPathWithoutSchemeAndAuthority(dir));
     }
     if (partDesc == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 5e52485..7464a29 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1676,7 +1676,7 @@ public class Hive {
       boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, Long txnId, int stmtId)
           throws HiveException {
     Path tblDataLocationPath =  tbl.getDataLocation();
-    boolean isMmTableWrite = MetaStoreUtils.isInsertOnlyTable(tbl.getParameters());
+    boolean isMmTableWrite = AcidUtils.isInsertOnlyTable(tbl.getParameters());
     try {
       // Get the partition object if it already exists
       Partition oldPart = getPartition(tbl, partSpec, false);
@@ -1722,7 +1722,7 @@ public class Hive {
       if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && (null != oldPart)) {
         newFiles = Collections.synchronizedList(new ArrayList<Path>());
       }
-      // TODO: this assumes both paths are qualified; which they are, currently.
+      // Note: this assumes both paths are qualified; which they are, currently.
       if (isMmTableWrite && loadPath.equals(newPartPath)) {
         // MM insert query, move itself is a no-op.
         if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
@@ -1870,7 +1870,7 @@ public class Hive {
     }
     PathFilter subdirFilter = null;
 
-    // TODO: just like the move path, we only do one level of recursion.
+    // Note: just like the move path, we only do one level of recursion.
     for (FileStatus src : srcs) {
       if (src.isDirectory()) {
         if (subdirFilter == null) {
@@ -1920,8 +1920,6 @@ public class Hive {
 private void walkDirTree(FileStatus fSta, FileSystem fSys,
     Map<List<String>, String> skewedColValueLocationMaps, Path newPartPath, SkewedInfo skewedInfo)
     throws IOException {
-  // TODO: may be broken? no LB bugs for now but if any are found.
-
   /* Base Case. It's leaf. */
   if (!fSta.isDir()) {
     if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
@@ -1958,7 +1956,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   List<String> skewedValue = new ArrayList<String>();
   String lbDirName = FileUtils.unescapePathName(lbdPath.toString());
   String partDirName = FileUtils.unescapePathName(newPartPath.toString());
-  String lbDirSuffix = lbDirName.replace(partDirName, ""); // TODO: wtf?
+  String lbDirSuffix = lbDirName.replace(partDirName, ""); // TODO: should it rather do a prefix?
   if (lbDirSuffix.startsWith(Path.SEPARATOR)) {
     lbDirSuffix = lbDirSuffix.substring(1);
   }
@@ -2105,7 +2103,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     // Get all valid partition paths and existing partitions for them (if any)
     final Table tbl = getTable(tableName);
     final Set<Path> validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, txnId, stmtId,
-        MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()));
+        AcidUtils.isInsertOnlyTable(tbl.getParameters()));
 
     final int partsToLoad = validPartitions.size();
     final AtomicInteger partitionsLoaded = new AtomicInteger(0);
@@ -2238,7 +2236,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
       newFiles = Collections.synchronizedList(new ArrayList<Path>());
     }
-    // TODO: this assumes both paths are qualified; which they are, currently.
+    // Note: this assumes both paths are qualified; which they are, currently.
     if (isMmTable && loadPath.equals(tbl.getPath())) {
       Utilities.FILE_OP_LOGGER.debug("not moving " + loadPath + " to " + tbl.getPath());
       if (replace) {
@@ -3797,7 +3795,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       }
 
       if (oldPath != null) {
-        // TODO: we assume lbLevels is 0 here. Same as old code for non-MM.
+        // Note: we assume lbLevels is 0 here. Same as old code for non-MM.
         //       For MM tables, this can only be a LOAD command. Does LOAD even support LB?
         deleteOldPathForReplace(destf, oldPath, conf, purge, deletePathFilter, isMmTable, 0);
       }
@@ -3863,8 +3861,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
         } else {
           // We need to clean up different MM IDs from each LB directory separately.
           // Avoid temporary directories in the immediate table/part dir.
-          // TODO: we could just find directories with any MM directories inside?
-          //       the rest doesn't have to be cleaned up.
+          // Note: we could just find directories with any MM directories inside?
+          //       the rest doesn't have to be cleaned up. Play it safe.
           String mask = "[^._]*";
           for (int i = 0; i < lbLevels - 1; ++i) {
             mask += Path.SEPARATOR + "*";

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
index ee8c249..d795a19 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
@@ -248,7 +248,7 @@ public class JsonMetaDataFormatter implements MetaDataFormatter {
    * @param tblPath not NULL
    * @throws IOException
    */
-  // Duplicates logic in TextMetaDataFormatter TODO: wtf?!!
+  // Duplicates logic in TextMetaDataFormatter
   private void putFileSystemsStats(MapBuilder builder, List<Path> locations,
       HiveConf conf, Path tblPath)
           throws IOException {

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
index 3c25896..5a18e33 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
@@ -318,7 +318,8 @@ class TextMetaDataFormatter implements MetaDataFormatter {
     public int numOfFiles = 0;
   }
 
-  // TODO: why is this in text formatter? grrr
+  // TODO: why is this in text formatter?!!
+  //       This computes stats and should be in stats (de-duplicated too).
   private void writeFileSystemStats(DataOutputStream outStream,
       HiveConf conf,
       List<Path> locations,

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
index 66ea4e2..c62d98f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
@@ -264,7 +265,7 @@ abstract public class AbstractBucketJoinProc implements NodeProcessor {
       }
 
       Table tbl = tso.getConf().getTableMetadata();
-      if (MetaStoreUtils.isInsertOnlyTable(tbl.getParameters())) {
+      if (AcidUtils.isInsertOnlyTable(tbl.getParameters())) {
         Utilities.FILE_OP_LOGGER.debug("No bucketed join on MM table " + tbl.getTableName());
         return false;
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
index 85dcdd7..fc1d4f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
@@ -415,7 +415,7 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
           /*ACID tables have complex directory layout and require merging of delta files
           * on read thus we should not try to read bucket files directly*/
           return null;
-        } else if (MetaStoreUtils.isInsertOnlyTable(tab.getParameters())) {
+        } else if (AcidUtils.isInsertOnlyTable(tab.getParameters())) {
           // Do not support MM tables either at this point. We could do it with some extra logic.
           return null;
         }
@@ -510,7 +510,7 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
             TableScanOperator ts = (TableScanOperator) op;
             Table srcTable = ts.getConf().getTableMetadata();
             // Not supported for MM tables for now.
-            if (MetaStoreUtils.isInsertOnlyTable(destTable.getParameters())) {
+            if (AcidUtils.isInsertOnlyTable(destTable.getParameters())) {
               return null;
             }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 9ffda7e..0d13a51 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -1372,12 +1372,6 @@ public final class GenMapRedUtils {
       dummyMv = new MoveWork(null, null, null,
           new LoadFileDesc(inputDirName, finalName, true, null, null, false), false,
           SessionState.get().getLineageState());
-    } else {
-      // TODO# noop MoveWork to avoid q file changes in HIVE-14990. Remove (w/the flag) after merge.
-      dummyMv = new MoveWork(null, null, null,
-          new LoadFileDesc(inputDirName, finalName, true, null, null, false), false,
-          SessionState.get().getLineageState());
-      dummyMv.setNoop(true);
     }
     // Use the original fsOp path here in case of MM - while the new FSOP merges files inside the
     // MM directory, the original MoveTask still commits based on the parent. Note that this path
@@ -1756,7 +1750,7 @@ public final class GenMapRedUtils {
     // Create a dummy task if no move is needed.
     Serializable moveWork = mvWork != null ? mvWork : new DependencyCollectionWork();
 
-    // TODO: this should never happen for mm tables.
+    // Note: this should never happen for mm tables.
     boolean shouldMergeMovePaths = (moveTaskToLink != null && dependencyTask == null
         && shouldMergeMovePaths(conf, condInputPath, condOutputPath, moveTaskToLink.getWork()));
 
@@ -1840,7 +1834,6 @@ public final class GenMapRedUtils {
     // find the move task
     for (Task<MoveWork> mvTsk : mvTasks) {
       MoveWork mvWork = mvTsk.getWork();
-      if (mvWork.isNoop()) continue;
       Path srcDir = null;
       boolean isLfd = false;
       if (mvWork.getLoadFileWork() != null) {
@@ -1878,7 +1871,7 @@ public final class GenMapRedUtils {
     MoveTask mvTask = (MoveTask) GenMapRedUtils.findMoveTaskForFsopOutput(
         mvTasks, fsOp.getConf().getFinalDirName(), fsOp.getConf().isMmTable());
 
-    // TODO: wtf? wtf?!! why is this in this method?
+    // TODO: wtf?!! why is this in this method? This has nothing to do with anything.
     if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER)
         && !fsOp.getConf().isMaterialization()) {
       // mark the MapredWork and FileSinkOperator for gathering stats

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
index 9d2e031..b5891ab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
 import org.apache.hadoop.hive.ql.lib.Dispatcher;
@@ -191,8 +192,8 @@ public class SamplePruner extends Transform {
     String fullScanMsg = "";
 
     // check if input pruning is possible
-    // TODO: this relies a lot on having one file per bucket. No support for MM tables for now.
-    boolean isMmTable = MetaStoreUtils.isInsertOnlyTable(part.getTable().getParameters());
+    // TODO: this code is buggy - it relies on having one file per bucket; no MM support (by design).
+    boolean isMmTable = AcidUtils.isInsertOnlyTable(part.getTable().getParameters());
     if (sampleDescr.getInputPruning() && !isMmTable) {
       LOG.trace("numerator = " + num);
       LOG.trace("denominator = " + den);

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java
index 5b89059..b28315a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.MapWork;
@@ -66,7 +67,7 @@ public class SamplingOptimizer implements PhysicalPlanResolver {
       if (tbl == null) {
         continue;
       }
-      if (MetaStoreUtils.isInsertOnlyTable(tbl.getParameters())) {
+      if (AcidUtils.isInsertOnlyTable(tbl.getParameters())) {
         // Not supported for MM tables - sampler breaks separate MM dirs into splits, resulting in
         // mismatch when the downstream task looks at them again assuming they are MM table roots.
         // We could somehow unset the MM flag for the main job when the sampler succeeds, since the

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 87e4ee8..68f7b47 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -1005,7 +1005,7 @@ public class Vectorizer implements PhysicalPlanResolver {
           setOperatorIssue("Alias " + alias + " not present in aliases " + aliases);
           return new ImmutablePair<Boolean,Boolean>(false, false);
         }
-        // TODO: should this use getPartitionDescFromPathRecursively?
+        // TODO: should this use getPartitionDescFromPathRecursively? That's what other code uses.
         PartitionDesc partDesc = pathToPartitionInfo.get(path);
         if (partDesc.getVectorPartitionDesc() != null) {
           // We've seen this already.

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 36bb89f..cbb422f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -994,7 +994,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
           throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
         }
         // It would be possible to support this, but this is such a pointless command.
-        if (MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
+        if (AcidUtils.isInsertOnlyTable(table.getParameters())) {
           throw new SemanticException("Truncating MM table columns not presently supported");
         }
 
@@ -1639,7 +1639,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     try {
       tblObj = getTable(tableName);
       // TODO: we should probably block all ACID tables here.
-      if (MetaStoreUtils.isInsertOnlyTable(tblObj.getParameters())) {
+      if (AcidUtils.isInsertOnlyTable(tblObj.getParameters())) {
         throw new SemanticException("Merge is not supported for MM tables");
       }
       mergeDesc.setTableDesc(Utilities.getTableDesc(tblObj));

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 41b5156..9afeadb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -134,7 +134,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
             parsedTableName = dbTablePair.getValue();
             // get partition metadata if partition specified
             if (child.getChildCount() == 2) {
-              @SuppressWarnings("unused") // TODO: wtf?
+              @SuppressWarnings("unused")
               ASTNode partspec = (ASTNode) child.getChild(1);
               isPartSpecSet = true;
               parsePartitionSpec(child, parsedPartSpec);
@@ -233,7 +233,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
     } catch (Exception e) {
       throw new HiveException(e);
     }
-    boolean isSourceMm = MetaStoreUtils.isInsertOnlyTable(tblDesc.getTblProps());
+    boolean isSourceMm = AcidUtils.isInsertOnlyTable(tblDesc.getTblProps());
 
     if ((replicationSpec != null) && replicationSpec.isInReplicationScope()){
       tblDesc.setReplicationSpec(replicationSpec);
@@ -313,7 +313,8 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
 
     Long txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
     int stmtId = 0;
-    // TODO: due to the master merge, tblDesc is no longer CreateTableDesc, but ImportTableDesc
+    // TODO [MM gap?]: bad merge; tblDesc is no longer CreateTableDesc, but ImportTableDesc.
+    //                 We need to verify the tests to see if this works correctly.
     /*
     if (isAcid(txnId)) {
       tblDesc.setInitialMmWriteId(txnId);
@@ -365,7 +366,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x,
       Long txnId, int stmtId, boolean isSourceMm) {
     Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
-    Path destPath = !MetaStoreUtils.isInsertOnlyTable(table.getParameters()) ? x.getCtx().getExternalTmpPath(tgtPath)
+    Path destPath = !AcidUtils.isInsertOnlyTable(table.getParameters()) ? x.getCtx().getExternalTmpPath(tgtPath)
         : new Path(tgtPath, AcidUtils.deltaSubdir(txnId, txnId, stmtId));
     if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
       Utilities.FILE_OP_LOGGER.trace("adding import work for table with source location: " +
@@ -376,7 +377,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
     Task<?> copyTask = null;
     if (replicationSpec.isInReplicationScope()) {
       if (isSourceMm || isAcid(txnId)) {
-        // TODO: ReplCopyTask is completely screwed. Need to support when it's not as screwed.
+        // Note: this is replication gap, not MM gap... Repl V2 is not ready yet.
         throw new RuntimeException("Replicating MM and ACID tables is not supported");
       }
       copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, destPath, x.getConf());
@@ -457,9 +458,9 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           + partSpecToString(partSpec.getPartSpec())
           + " with source location: " + srcLocation);
       Path tgtLocation = new Path(partSpec.getLocation());
-      Path destPath = !MetaStoreUtils.isInsertOnlyTable(table.getParameters()) ? x.getCtx().getExternalTmpPath(tgtLocation)
+      Path destPath = !AcidUtils.isInsertOnlyTable(table.getParameters()) ? x.getCtx().getExternalTmpPath(tgtLocation)
           : new Path(tgtLocation, AcidUtils.deltaSubdir(txnId, txnId, stmtId));
-      Path moveTaskSrc =  !MetaStoreUtils.isInsertOnlyTable(table.getParameters()) ? destPath : tgtLocation;
+      Path moveTaskSrc =  !AcidUtils.isInsertOnlyTable(table.getParameters()) ? destPath : tgtLocation;
       if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
         Utilities.FILE_OP_LOGGER.trace("adding import work for partition with source location: "
           + srcLocation + "; target: " + tgtLocation + "; copy dest " + destPath + "; mm "
@@ -470,7 +471,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       Task<?> copyTask = null;
       if (replicationSpec.isInReplicationScope()) {
         if (isSourceMm || isAcid(txnId)) {
-          // TODO: ReplCopyTask is completely screwed. Need to support when it's not as screwed.
+          // Note: this is replication gap, not MM gap... Repl V2 is not ready yet.
           throw new RuntimeException("Replicating MM and ACID tables is not supported");
         }
         copyTask = ReplCopyTask.getLoadCopyTask(
@@ -792,7 +793,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         x.getLOG().debug("table partitioned");
         Task<?> ict = createImportCommitTask(
             table.getDbName(), table.getTableName(), txnId, stmtId, x.getConf(),
-            MetaStoreUtils.isInsertOnlyTable(table.getParameters()));
+            AcidUtils.isInsertOnlyTable(table.getParameters()));
 
         for (AddPartitionDesc addPartitionDesc : partitionDescs) {
           Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
@@ -829,7 +830,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       if (isPartitioned(tblDesc)) {
         Task<?> ict = createImportCommitTask(
             tblDesc.getDatabaseName(), tblDesc.getTableName(), txnId, stmtId, x.getConf(),
-            MetaStoreUtils.isInsertOnlyTable(tblDesc.getTblProps()));
+            AcidUtils.isInsertOnlyTable(tblDesc.getTblProps()));
         for (AddPartitionDesc addPartitionDesc : partitionDescs) {
           t.addDependentTask(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc,
             replicationSpec, x, txnId, stmtId, isSourceMm, ict));
@@ -956,7 +957,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         if (isPartitioned(tblDesc)) {
           Task<?> ict = createImportCommitTask(
               tblDesc.getDatabaseName(), tblDesc.getTableName(), txnId, stmtId, x.getConf(),
-              MetaStoreUtils.isInsertOnlyTable(tblDesc.getTblProps()));
+              AcidUtils.isInsertOnlyTable(tblDesc.getTblProps()));
           for (AddPartitionDesc addPartitionDesc : partitionDescs) {
             addPartitionDesc.setReplicationSpec(replicationSpec);
             t.addDependentTask(
@@ -982,7 +983,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           org.apache.hadoop.hive.ql.metadata.Partition ptn = null;
           Task<?> ict = replicationSpec.isMetadataOnly() ? null : createImportCommitTask(
               tblDesc.getDatabaseName(), tblDesc.getTableName(), txnId, stmtId, x.getConf(),
-              MetaStoreUtils.isInsertOnlyTable(tblDesc.getTblProps()));
+              AcidUtils.isInsertOnlyTable(tblDesc.getTblProps()));
           if ((ptn = x.getHive().getPartition(table, partSpec, false)) == null) {
             if (!replicationSpec.isMetadataOnly()){
               x.getTasks().add(addSinglePartition(

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index ab2b65f..51efde1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -228,7 +228,7 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
           + " and use 'insert... select' to allow Hive to enforce bucketing. " + error);
     }
 
-    if(AcidUtils.isAcidTable(ts.tableHandle) && !MetaStoreUtils.isInsertOnlyTable(ts.tableHandle.getParameters())) {
+    if(AcidUtils.isAcidTable(ts.tableHandle) && !AcidUtils.isInsertOnlyTable(ts.tableHandle.getParameters())) {
       throw new SemanticException(ErrorMsg.LOAD_DATA_ON_ACID_TABLE, ts.tableHandle.getCompleteName());
     }
     // make sure the arguments make sense
@@ -277,7 +277,7 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
     Long txnId = null;
     int stmtId = 0;
     Table tbl = ts.tableHandle;
-    if (MetaStoreUtils.isInsertOnlyTable(tbl.getParameters())) {
+    if (AcidUtils.isInsertOnlyTable(tbl.getParameters())) {
       txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index f3f932b..99b8712 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6837,7 +6837,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       }
 
       boolean isNonNativeTable = dest_tab.isNonNative();
-      isMmTable = MetaStoreUtils.isInsertOnlyTable(dest_tab.getParameters());
+      isMmTable = AcidUtils.isInsertOnlyTable(dest_tab.getParameters());
       if (isNonNativeTable || isMmTable) {
         queryTmpdir = dest_path;
       } else {
@@ -6873,7 +6873,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
           checkAcidConstraints(qb, table_desc, dest_tab);
         }
-        if (MetaStoreUtils.isInsertOnlyTable(table_desc.getProperties())) {
+        if (AcidUtils.isInsertOnlyTable(table_desc.getProperties())) {
           acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
         }
         if (isMmTable) {
@@ -6924,7 +6924,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       dest_path = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
           .getAuthority(), partPath.toUri().getPath());
 
-      isMmTable = MetaStoreUtils.isInsertOnlyTable(dest_tab.getParameters());
+      isMmTable = AcidUtils.isInsertOnlyTable(dest_tab.getParameters());
       queryTmpdir = isMmTable ? dest_path : ctx.getTempDirForPath(dest_path, true);
       if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
         Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_PARTITION specifying "
@@ -6947,7 +6947,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
         checkAcidConstraints(qb, table_desc, dest_tab);
       }
-      if (MetaStoreUtils.isInsertOnlyTable(dest_part.getTable().getParameters())) {
+      if (AcidUtils.isInsertOnlyTable(dest_part.getTable().getParameters())) {
         acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
       }
       if (isMmTable) {
@@ -6990,7 +6990,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         field_schemas = new ArrayList<FieldSchema>();
         destTableIsTemporary = tblDesc.isTemporary();
         destTableIsMaterialization = tblDesc.isMaterialization();
-        if (!destTableIsTemporary && MetaStoreUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) {
+        if (!destTableIsTemporary && AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) {
           isMmTable = isMmCtas = true;
           txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
           tblDesc.setInitialMmWriteId(txnId);
@@ -7253,8 +7253,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     // If this is an insert, update, or delete on an ACID table then mark that so the
     // FileSinkOperator knows how to properly write to it.
     boolean isDestInsertOnly = (dest_part != null && dest_part.getTable() != null &&
-        MetaStoreUtils.isInsertOnlyTable(dest_part.getTable().getParameters()))
-        || (table_desc != null && MetaStoreUtils.isInsertOnlyTable(table_desc.getProperties()));
+        AcidUtils.isInsertOnlyTable(dest_part.getTable().getParameters()))
+        || (table_desc != null && AcidUtils.isInsertOnlyTable(table_desc.getProperties()));
 
     if (isDestInsertOnly) {
       fileSinkDesc.setWriteType(Operation.INSERT);
@@ -12056,7 +12056,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       if (p != null) {
         tbl = p.getTable();
       }
-      if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()))) {
+      if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || AcidUtils.isInsertOnlyTable(tbl.getParameters()))) {
         acidInQuery = true;
         checkAcidTxnManager(tbl);
       }
@@ -12119,7 +12119,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         tbl = writeEntity.getTable();
       }
 
-      if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()))) {
+      if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || AcidUtils.isInsertOnlyTable(tbl.getParameters()))) {
         acidInQuery = true;
         checkAcidTxnManager(tbl);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index 4732f0a..ea8fc19 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@ -283,9 +283,9 @@ public class FileSinkDesc extends AbstractOperatorDesc {
 
   public boolean isMmTable() {
     if (getTable() != null) {
-      return MetaStoreUtils.isInsertOnlyTable(table.getParameters());
+      return AcidUtils.isInsertOnlyTable(table.getParameters());
     } else { // Dynamic Partition Insert case
-      return MetaStoreUtils.isInsertOnlyTable(getTableInfo().getProperties());
+      return AcidUtils.isInsertOnlyTable(getTableInfo().getProperties());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
index 0b7fe01..f246115 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
@@ -42,7 +42,7 @@ public class LoadTableDesc extends LoadDesc implements Serializable {
   private int stmtId;
   private Long currentTransactionId;
 
-  // TODO: the below seems like they should just be combined into partitionDesc
+  // TODO: the below seem like they should just be combined into partitionDesc
   private org.apache.hadoop.hive.ql.plan.TableDesc table;
   private Map<String, String> partitionSpec; // NOTE: this partitionSpec has to be ordered map
 
@@ -162,7 +162,7 @@ public class LoadTableDesc extends LoadDesc implements Serializable {
   }
 
   public boolean isMmTable() {
-    return MetaStoreUtils.isInsertOnlyTable(table.getProperties());
+    return AcidUtils.isInsertOnlyTable(table.getProperties());
   }
 
   public void setReplace(boolean replace) {

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
index 17d2f31..28a3374 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
@@ -165,14 +165,6 @@ public class MoveWork implements Serializable {
   public void setSrcLocal(boolean srcLocal) {
     this.srcLocal = srcLocal;
   }
-
-  public void setNoop(boolean b) {
-    this.isNoop = true;
-  }
-
-  public boolean isNoop() {
-    return this.isNoop;
-  }
   
   public LineageState getLineagState() {
     return sessionStateLineageState;

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 8b3ad75..7d4d379 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -210,7 +210,7 @@ public class CompactorMR {
 
     // For MM tables we don't need to launch MR jobs as there is no compaction needed.
     // We just need to delete the directories for aborted transactions.
-    if (MetaStoreUtils.isInsertOnlyTable(t.getParameters())) {
+    if (AcidUtils.isInsertOnlyTable(t.getParameters())) {
       LOG.debug("Going to delete directories for aborted transactions for MM table "
           + t.getDbName() + "." + t.getTableName());
       removeFiles(conf, sd.getLocation(), txns, t);

http://git-wip-us.apache.org/repos/asf/hive/blob/64015972/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
index c52bd3e..8daa5c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
@@ -253,7 +253,7 @@ public class Initiator extends CompactorThread {
                                                  StorageDescriptor sd, Map<String, String> tblproperties)
       throws IOException, InterruptedException {
 
-    if (MetaStoreUtils.isInsertOnlyTable(tblproperties)) {
+    if (AcidUtils.isInsertOnlyTable(tblproperties)) {
       return CompactionType.MINOR;
     }