You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/23 18:51:51 UTC

[11/13] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0723

HIVE-19416 : merge master into branch (Sergey Shelukhin) 0723


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d2c60f3a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d2c60f3a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d2c60f3a

Branch: refs/heads/master-txnstats
Commit: d2c60f3ae18fb18b1bc50355a7740cf352cab782
Parents: e8d7cdc 90d19ac
Author: sergey <se...@apache.org>
Authored: Mon Jul 23 11:22:33 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Mon Jul 23 11:22:33 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/log/PerfLogger.java   |    2 +
 .../scripts/upgrade/derby/upgrade.order.derby   |    2 +
 .../scripts/upgrade/mssql/upgrade.order.mssql   |    2 +
 .../scripts/upgrade/mysql/upgrade.order.mysql   |    2 +
 .../scripts/upgrade/oracle/upgrade.order.oracle |    2 +
 .../upgrade/postgres/upgrade.order.postgres     |    2 +
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |   46 +
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   16 +-
 .../exec/spark/SparkDynamicPartitionPruner.java |   25 +-
 .../hive/ql/exec/spark/SparkPlanGenerator.java  |   24 +
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  211 ++
 .../hadoop/hive/ql/io/HiveInputFormat.java      |   12 -
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |  221 +--
 .../apache/hadoop/hive/ql/metadata/Table.java   |    4 +
 .../calcite/translator/RexNodeConverter.java    |    2 +-
 .../hive/ql/parse/ExplainConfiguration.java     |    8 +
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |    2 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |    1 +
 .../apache/hadoop/hive/ql/plan/ExplainWork.java |   17 +
 .../hive/ql/udf/generic/GenericUDFIn.java       |    2 +-
 .../hive/ql/udf/generic/GenericUDFUtils.java    |   57 +-
 ql/src/test/queries/clientpositive/bucket7.q    |    2 +
 .../test/queries/clientpositive/explain_locks.q |   22 +
 .../queries/clientpositive/orc_ppd_decimal.q    |   16 +-
 .../clientpositive/parquet_ppd_decimal.q        |   16 +-
 .../vectorization_parquet_ppd_decimal.q         |   16 +-
 .../results/clientpositive/explain_locks.q.out  |   91 +
 .../clientpositive/llap/orc_ppd_decimal.q.out   |   48 +-
 .../clientpositive/parquet_ppd_decimal.q.out    |   80 +-
 .../vectorization_parquet_ppd_decimal.q.out     |   80 +-
 .../server/ThreadWithGarbageCleanup.java        |    6 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |    8 +-
 .../hive/metastore/LockComponentBuilder.java    |    5 +
 .../hive/metastore/LockRequestBuilder.java      |   17 +
 .../hadoop/hive/metastore/ObjectStore.java      |    7 +-
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |   15 -
 .../main/sql/derby/hive-schema-3.2.0.derby.sql  |  720 +++++++
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |    2 +-
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |   16 -
 .../sql/derby/upgrade-3.1.0-to-3.2.0.derby.sql  |   20 +
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |    8 -
 .../sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql  |    8 +
 .../src/main/sql/derby/upgrade.order.derby      |    3 +-
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |   17 -
 .../main/sql/mssql/hive-schema-3.2.0.mssql.sql  | 1284 ++++++++++++
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |    2 +-
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |   17 -
 .../sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql  |   23 +
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |   10 -
 .../sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql  |   10 +
 .../src/main/sql/mssql/upgrade.order.mssql      |    3 +-
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |   16 -
 .../main/sql/mysql/hive-schema-3.2.0.mysql.sql  | 1218 ++++++++++++
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |    2 +-
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |   15 -
 .../sql/mysql/upgrade-3.1.0-to-3.2.0.mysql.sql  |   22 +
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |   10 -
 .../sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql  |   10 +
 .../src/main/sql/mysql/upgrade.order.mysql      |    3 +-
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |   16 -
 .../sql/oracle/hive-schema-3.2.0.oracle.sql     | 1175 +++++++++++
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |    2 +-
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |   16 -
 .../oracle/upgrade-3.1.0-to-3.2.0.oracle.sql    |   22 +
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |    9 -
 .../oracle/upgrade-3.2.0-to-4.0.0.oracle.sql    |    9 +
 .../src/main/sql/oracle/upgrade.order.oracle    |    3 +-
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |   16 -
 .../sql/postgres/hive-schema-3.2.0.postgres.sql | 1866 ++++++++++++++++++
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |    2 +-
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |   16 -
 .../upgrade-3.1.0-to-3.2.0.postgres.sql         |   22 +
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |   10 -
 .../upgrade-3.2.0-to-4.0.0.postgres.sql         |   10 +
 .../main/sql/postgres/upgrade.order.postgres    |    3 +-
 75 files changed, 7218 insertions(+), 507 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 183d690,a9983b0..7818efb
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@@ -32,7 -32,7 +32,8 @@@ import java.util.Properties
  import java.util.Set;
  import java.util.regex.Pattern;
  
 +import org.apache.avro.generic.GenericData;
+ import com.google.common.base.Preconditions;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FSDataInputStream;
  import org.apache.hadoop.fs.FSDataOutputStream;
@@@ -40,23 -40,31 +41,31 @@@ import org.apache.hadoop.fs.FileStatus
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.fs.PathFilter;
 -import org.apache.hadoop.hive.common.HiveStatsUtils;
 -import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
 -import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
 -import org.apache.hadoop.hive.common.ValidWriteIdList;
 +import org.apache.hadoop.hive.common.*;
  import org.apache.hadoop.hive.conf.HiveConf;
  import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.LockComponentBuilder;
  import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 +import org.apache.hadoop.hive.metastore.Warehouse;
  import org.apache.hadoop.hive.metastore.api.DataOperationType;
+ import org.apache.hadoop.hive.metastore.api.LockComponent;
+ import org.apache.hadoop.hive.metastore.api.LockType;
  import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
  import org.apache.hadoop.hive.ql.ErrorMsg;
  import org.apache.hadoop.hive.ql.exec.Utilities;
+ import org.apache.hadoop.hive.ql.hooks.Entity;
+ import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+ import org.apache.hadoop.hive.ql.hooks.WriteEntity;
  import org.apache.hadoop.hive.ql.io.AcidUtils.ParsedDelta;
  import org.apache.hadoop.hive.ql.io.orc.OrcFile;
  import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
  import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater;
  import org.apache.hadoop.hive.ql.io.orc.Reader;
  import org.apache.hadoop.hive.ql.io.orc.Writer;
 +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 +import org.apache.hadoop.hive.ql.lockmgr.LockException;
+ import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
  import org.apache.hadoop.hive.ql.metadata.Table;
  import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
  import org.apache.hadoop.hive.ql.plan.TableScanDesc;
@@@ -2114,4 -1993,201 +2123,206 @@@ public class AcidUtils 
      tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "false");
      tblProps.remove(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
    }
+ 
+   private static boolean needsLock(Entity entity) {
+     switch (entity.getType()) {
+     case TABLE:
+       return isLockableTable(entity.getTable());
+     case PARTITION:
+       return isLockableTable(entity.getPartition().getTable());
+     default:
+       return true;
+     }
+   }
+ 
+   private static Table getTable(WriteEntity we) {
+     Table t = we.getTable();
+     if (t == null) {
+       throw new IllegalStateException("No table info for " + we);
+     }
+     return t;
+   }
+ 
+   private static boolean isLockableTable(Table t) {
+     if (t.isTemporary()) {
+       return false;
+     }
+     switch (t.getTableType()) {
+     case MANAGED_TABLE:
+     case MATERIALIZED_VIEW:
+       return true;
+     default:
+       return false;
+     }
+   }
+ 
+   /**
+    * Create lock components from write/read entities.
+    * @param outputs write entities
+    * @param inputs read entities
+    * @param conf
+    * @return list with lock components
+    */
+   public static List<LockComponent> makeLockComponents(Set<WriteEntity> outputs, Set<ReadEntity> inputs,
+       HiveConf conf) {
+     List<LockComponent> lockComponents = new ArrayList<>();
+     // For each source to read, get a shared lock
+     for (ReadEntity input : inputs) {
+       if (!input.needsLock() || input.isUpdateOrDelete() || !AcidUtils.needsLock(input)) {
+         // We don't want to acquire read locks during update or delete as we'll be acquiring write
+         // locks instead. Also, there's no need to lock temp tables since they're session wide
+         continue;
+       }
+       LockComponentBuilder compBuilder = new LockComponentBuilder();
+       compBuilder.setShared();
+       compBuilder.setOperationType(DataOperationType.SELECT);
+ 
+       Table t = null;
+       switch (input.getType()) {
+       case DATABASE:
+         compBuilder.setDbName(input.getDatabase().getName());
+         break;
+ 
+       case TABLE:
+         t = input.getTable();
+         compBuilder.setDbName(t.getDbName());
+         compBuilder.setTableName(t.getTableName());
+         break;
+ 
+       case PARTITION:
+       case DUMMYPARTITION:
+         compBuilder.setPartitionName(input.getPartition().getName());
+         t = input.getPartition().getTable();
+         compBuilder.setDbName(t.getDbName());
+         compBuilder.setTableName(t.getTableName());
+         break;
+ 
+       default:
+         // This is a file or something we don't hold locks for.
+         continue;
+       }
+       if (t != null) {
+         compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
+       }
+       LockComponent comp = compBuilder.build();
+       LOG.debug("Adding lock component to lock request " + comp.toString());
+       lockComponents.add(comp);
+     }
+     // For each source to write to, get the appropriate lock type.  If it's
+     // an OVERWRITE, we need to get an exclusive lock.  If it's an insert (no
+     // overwrite) than we need a shared.  If it's update or delete then we
+     // need a SEMI-SHARED.
+     for (WriteEntity output : outputs) {
+       LOG.debug("output is null " + (output == null));
+       if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR || !AcidUtils
+           .needsLock(output)) {
+         // We don't lock files or directories. We also skip locking temp tables.
+         continue;
+       }
+       LockComponentBuilder compBuilder = new LockComponentBuilder();
+       Table t = null;
+       switch (output.getType()) {
+       case DATABASE:
+         compBuilder.setDbName(output.getDatabase().getName());
+         break;
+ 
+       case TABLE:
+       case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
+         t = output.getTable();
+         compBuilder.setDbName(t.getDbName());
+         compBuilder.setTableName(t.getTableName());
+         break;
+ 
+       case PARTITION:
+         compBuilder.setPartitionName(output.getPartition().getName());
+         t = output.getPartition().getTable();
+         compBuilder.setDbName(t.getDbName());
+         compBuilder.setTableName(t.getTableName());
+         break;
+ 
+       default:
+         // This is a file or something we don't hold locks for.
+         continue;
+       }
+       switch (output.getWriteType()) {
+         /* base this on HiveOperation instead?  this and DDL_NO_LOCK is peppered all over the code...
+          Seems much cleaner if each stmt is identified as a particular HiveOperation (which I'd think
+          makes sense everywhere).  This however would be problematic for merge...*/
+       case DDL_EXCLUSIVE:
+         compBuilder.setExclusive();
+         compBuilder.setOperationType(DataOperationType.NO_TXN);
+         break;
+       case INSERT_OVERWRITE:
+         t = AcidUtils.getTable(output);
+         if (AcidUtils.isTransactionalTable(t)) {
+           if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK)) {
+             compBuilder.setExclusive();
+           } else {
+             compBuilder.setSemiShared();
+           }
+           compBuilder.setOperationType(DataOperationType.UPDATE);
+         } else {
+           compBuilder.setExclusive();
+           compBuilder.setOperationType(DataOperationType.NO_TXN);
+         }
+         break;
+       case INSERT:
+         assert t != null;
+         if (AcidUtils.isTransactionalTable(t)) {
+           compBuilder.setShared();
+         } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) {
+           final HiveStorageHandler storageHandler = Preconditions.checkNotNull(t.getStorageHandler(),
+               "Thought all the non native tables have an instance of storage handler");
+           LockType lockType = storageHandler.getLockType(output);
+           if (null == LockType.findByValue(lockType.getValue())) {
+             throw new IllegalArgumentException(String
+                 .format("Lock type [%s] for Database.Table [%s.%s] is unknown", lockType, t.getDbName(),
+                     t.getTableName()));
+           }
+           compBuilder.setLock(lockType);
+         } else {
+           if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) {
+             compBuilder.setExclusive();
+           } else {  // this is backward compatible for non-ACID resources, w/o ACID semantics
+             compBuilder.setShared();
+           }
+         }
+         compBuilder.setOperationType(DataOperationType.INSERT);
+         break;
+       case DDL_SHARED:
+         compBuilder.setShared();
 -        compBuilder.setOperationType(DataOperationType.NO_TXN);
++        if (!output.isTxnAnalyze()) {
++          // Analyze needs txn components to be present, otherwise an aborted analyze write ID
++          // might be rolled under the watermark by compactor while stats written by it are
++          // still present.
++          compBuilder.setOperationType(DataOperationType.NO_TXN);
++        }
+         break;
+ 
+       case UPDATE:
+         compBuilder.setSemiShared();
+         compBuilder.setOperationType(DataOperationType.UPDATE);
+         break;
+       case DELETE:
+         compBuilder.setSemiShared();
+         compBuilder.setOperationType(DataOperationType.DELETE);
+         break;
+ 
+       case DDL_NO_LOCK:
+         continue; // No lock required here
+ 
+       default:
+         throw new RuntimeException("Unknown write type " + output.getWriteType().toString());
+       }
+       if (t != null) {
+         compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
+       }
+ 
+       compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());
+       LockComponent comp = compBuilder.build();
+       LOG.debug("Adding lock component to lock request " + comp.toString());
+       lockComponents.add(comp);
+     }
+     return lockComponents;
+   }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index d3eefb9,06067a2..27abaf5
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@@ -24,12 -22,9 +24,10 @@@ import org.apache.hadoop.conf.Configura
  import org.apache.hadoop.hive.common.JavaUtils;
  import org.apache.hadoop.hive.common.ValidTxnList;
  import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
 +import org.apache.hadoop.hive.common.ValidWriteIdList;
  import org.apache.hadoop.hive.conf.HiveConf;
  import org.apache.hadoop.hive.metastore.IMetaStoreClient;
- import org.apache.hadoop.hive.metastore.LockComponentBuilder;
  import org.apache.hadoop.hive.metastore.LockRequestBuilder;
- import org.apache.hadoop.hive.metastore.api.DataOperationType;
  import org.apache.hadoop.hive.metastore.api.LockComponent;
  import org.apache.hadoop.hive.metastore.api.LockResponse;
  import org.apache.hadoop.hive.metastore.api.LockState;
@@@ -432,182 -398,15 +402,16 @@@ public final class DbTxnManager extend
      rqstBuilder.setTransactionId(txnId)
          .setUser(username);
  
-     // For each source to read, get a shared lock
-     for (ReadEntity input : plan.getInputs()) {
-       if (!input.needsLock() || input.isUpdateOrDelete() || !needsLock(input)) {
-         // We don't want to acquire read locks during update or delete as we'll be acquiring write
-         // locks instead. Also, there's no need to lock temp tables since they're session wide
-         continue;
-       }
-       LockComponentBuilder compBuilder = new LockComponentBuilder();
-       compBuilder.setShared();
-       compBuilder.setOperationType(DataOperationType.SELECT);
- 
-       Table t = null;
-       switch (input.getType()) {
-         case DATABASE:
-           compBuilder.setDbName(input.getDatabase().getName());
-           break;
- 
-         case TABLE:
-           t = input.getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         case PARTITION:
-         case DUMMYPARTITION:
-           compBuilder.setPartitionName(input.getPartition().getName());
-           t = input.getPartition().getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         default:
-           // This is a file or something we don't hold locks for.
-           continue;
-       }
-       if(t != null) {
-         compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
-       }
-       LockComponent comp = compBuilder.build();
-       LOG.debug("Adding lock component to lock request " + comp.toString());
-       rqstBuilder.addLockComponent(comp);
-       atLeastOneLock = true;
-     }
- 
-     // For each source to write to, get the appropriate lock type.  If it's
-     // an OVERWRITE, we need to get an exclusive lock.  If it's an insert (no
-     // overwrite) than we need a shared.  If it's update or delete then we
-     // need a SEMI-SHARED.
-     for (WriteEntity output : plan.getOutputs()) {
-       LOG.debug("output is null " + (output == null));
-       if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR ||
-           !needsLock(output)) {
-         // We don't lock files or directories. We also skip locking temp tables.
-         continue;
-       }
-       LockComponentBuilder compBuilder = new LockComponentBuilder();
-       Table t = null;
-       switch (output.getType()) {
-         case DATABASE:
-           compBuilder.setDbName(output.getDatabase().getName());
-           break;
- 
-         case TABLE:
-         case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
-           t = output.getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         case PARTITION:
-           compBuilder.setPartitionName(output.getPartition().getName());
-           t = output.getPartition().getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         default:
-           // This is a file or something we don't hold locks for.
-           continue;
-       }
-       switch (output.getWriteType()) {
-         /* base this on HiveOperation instead?  this and DDL_NO_LOCK is peppered all over the code...
-          Seems much cleaner if each stmt is identified as a particular HiveOperation (which I'd think
-          makes sense everywhere).  This however would be problematic for merge...*/
-       case DDL_EXCLUSIVE:
-         compBuilder.setExclusive();
-         compBuilder.setOperationType(DataOperationType.NO_TXN);
-         break;
-       case INSERT_OVERWRITE:
-         t = getTable(output);
-         if (AcidUtils.isTransactionalTable(t)) {
-           if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK)) {
-             compBuilder.setExclusive();
-           } else {
-             compBuilder.setSemiShared();
-           }
-           compBuilder.setOperationType(DataOperationType.UPDATE);
-         } else {
-           compBuilder.setExclusive();
-           compBuilder.setOperationType(DataOperationType.NO_TXN);
-         }
-         break;
-       case INSERT:
-         assert t != null;
-         if (AcidUtils.isTransactionalTable(t)) {
-           compBuilder.setShared();
-         } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) {
-           final HiveStorageHandler storageHandler = Preconditions.checkNotNull(t.getStorageHandler(),
-               "Thought all the non native tables have an instance of storage handler"
-           );
-           LockType lockType = storageHandler.getLockType(output);
-           switch (lockType) {
-           case EXCLUSIVE:
-             compBuilder.setExclusive();
-             break;
-           case SHARED_READ:
-             compBuilder.setShared();
-             break;
-           case SHARED_WRITE:
-             compBuilder.setSemiShared();
-             break;
-           default:
-             throw new IllegalArgumentException(String
-                 .format("Lock type [%s] for Database.Table [%s.%s] is unknown", lockType, t.getDbName(),
-                     t.getTableName()
-                 ));
-           }
 +
-         } else {
-           if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) {
-             compBuilder.setExclusive();
-           } else {  // this is backward compatible for non-ACID resources, w/o ACID semantics
-             compBuilder.setShared();
-           }
-         }
-         compBuilder.setOperationType(DataOperationType.INSERT);
-         break;
-       case DDL_SHARED:
-         compBuilder.setShared();
-         if (!output.isTxnAnalyze()) {
-           // Analyze needs txn components to be present, otherwise an aborted analyze write ID
-           // might be rolled under the watermark by compactor while stats written by it are
-           // still present.
-           compBuilder.setOperationType(DataOperationType.NO_TXN);
-         }
-         break;
- 
-       case UPDATE:
-         compBuilder.setSemiShared();
-         compBuilder.setOperationType(DataOperationType.UPDATE);
-         break;
-       case DELETE:
-         compBuilder.setSemiShared();
-         compBuilder.setOperationType(DataOperationType.DELETE);
-         break;
- 
-       case DDL_NO_LOCK:
-         continue; // No lock required here
- 
-       default:
-         throw new RuntimeException("Unknown write type " + output.getWriteType().toString());
-       }
-       if (t != null) {
-         compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
-       }
- 
-       compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());
-       LockComponent comp = compBuilder.build();
-       LOG.debug("Adding lock component to lock request " + comp.toString());
-       rqstBuilder.addLockComponent(comp);
-       atLeastOneLock = true;
-     }
-     //plan
      // Make sure we need locks.  It's possible there's nothing to lock in
      // this operation.
-     if (!atLeastOneLock) {
+     if(plan.getInputs().isEmpty() && plan.getOutputs().isEmpty()) {
+       LOG.debug("No locks needed for queryId" + queryId);
+       return null;
+     }
+     List<LockComponent> lockComponents = AcidUtils.makeLockComponents(plan.getOutputs(), plan.getInputs(), conf);
+     //It's possible there's nothing to lock even if we have w/r entities.
+     if(lockComponents.isEmpty()) {
        LOG.debug("No locks needed for queryId" + queryId);
        return null;
      }

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
index 0000000,e5c8ef7..aca5227
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
@@@ -1,0 -1,6 +1,8 @@@
+ -- Upgrade MetaStore schema from 3.2.0 to 4.0.0
 -
++-- HIVE-19416
++ALTER TABLE "APP"."TBLS" ADD WRITE_ID bigint DEFAULT 0;
++ALTER TABLE "APP"."PARTITIONS" ADD WRITE_ID bigint DEFAULT 0;
+ 
+ -- This needs to be the last thing done.  Insert any changes above this line.
+ UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
index 0000000,a8aad87..1d8fc55
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE;
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE;
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
index 0000000,cb2e985..79e72ab
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.2.0 to 4.0.0' AS ' ';
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS ' ';
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
index 0000000,89d37f4..aa20a49
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
@@@ -1,0 -1,6 +1,9 @@@
+ SELECT 'Upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual;
+ 
++ALTER TABLE TBLS ADD WRITE_ID number NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID number NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual;
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
index 0000000,08a1341..f7232a1
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.2.0 to 4.0.0';
+ 
++-- HIVE-19416
++ALTER TABLE "TBLS" ADD "WRITE_ID" bigint;
++ALTER TABLE "PARTITIONS" ADD "WRITE_ID" bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0';
+