You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ek...@apache.org on 2018/08/04 00:44:13 UTC

hive git commit: HIVE-17683: Add explain locks command (Igor Kryvenko via Eugene Koifman)

Repository: hive
Updated Branches:
  refs/heads/branch-3 9f9b1e9b2 -> 96e7c4726


HIVE-17683: Add explain locks <sql> command (Igor Kryvenko via Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/96e7c472
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/96e7c472
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/96e7c472

Branch: refs/heads/branch-3
Commit: 96e7c4726778998c4c4ed31a21a7d7134f941b73
Parents: 9f9b1e9
Author: Igor Kryvenko <kr...@gmail.com>
Authored: Fri Aug 3 17:18:41 2018 -0700
Committer: Eugene Koifman <ek...@apache.org>
Committed: Fri Aug 3 17:18:41 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |  46 +++++
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 207 +++++++++++++++++++
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    | 176 +---------------
 .../apache/hadoop/hive/ql/metadata/Table.java   |   4 +
 .../hive/ql/parse/ExplainConfiguration.java     |   8 +
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |   2 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |   1 +
 .../hadoop/hive/ql/plan/ExplainLockDesc.java    | 116 +++++++++++
 .../apache/hadoop/hive/ql/plan/ExplainWork.java |  17 ++
 .../test/queries/clientpositive/explain_locks.q |  22 ++
 .../results/clientpositive/explain_locks.q.out  |  91 ++++++++
 .../hive/metastore/LockComponentBuilder.java    |   6 +
 .../hive/metastore/LockRequestBuilder.java      |  17 ++
 13 files changed, 544 insertions(+), 169 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
index cbdeb33..5fa5e9e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
@@ -45,9 +45,11 @@ import org.apache.hadoop.hive.common.jsonexplain.JsonParserFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.LockComponent;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.physical.StageIDsRearranger;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
@@ -55,6 +57,7 @@ import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.VectorizationDetailL
 import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
+import org.apache.hadoop.hive.ql.plan.ExplainLockDesc;
 import org.apache.hadoop.hive.ql.plan.ExplainWork;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
@@ -296,6 +299,44 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
     return jsonOutput ? outJSONObject : null;
   }
 
+  private JSONObject getLocks(PrintStream out, ExplainWork work) {
+
+    JSONObject jsonObject = new JSONObject(new LinkedHashMap<>());
+
+    boolean jsonOutput = work.isFormatted();
+    if (jsonOutput) {
+      out = null;
+    }
+    if (work.getParseContext() != null) {
+      List<LockComponent> lockComponents = AcidUtils.makeLockComponents(work.getOutputs(), work.getInputs(), conf);
+      if (null != out) {
+        out.print("LOCK INFORMATION:\n");
+      }
+      List<ExplainLockDesc> locks = new ArrayList<>(lockComponents.size());
+
+      for (LockComponent component : lockComponents) {
+        ExplainLockDesc lockDesc = new ExplainLockDesc(component);
+
+        if (null != out) {
+          out.print(lockDesc.getFullName());
+          out.print(" -> ");
+          out.print(lockDesc.getLockType());
+          out.print('\n');
+        } else {
+          locks.add(lockDesc);
+        }
+
+      }
+
+      if (jsonOutput) {
+        jsonObject.put("LOCK INFORMATION:", locks);
+      }
+    } else {
+      System.err.println("No parse context!");
+    }
+    return jsonObject;
+  }
+
   private List<String> toString(Collection<?> objects) {
     List<String> list = new ArrayList<String>();
     for (Object object : objects) {
@@ -353,6 +394,11 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
       } else if (work.getDependency()) {
         JSONObject jsonDependencies = getJSONDependencies(work);
         out.print(jsonDependencies);
+      }  else if (work.isLocks()) {
+        JSONObject jsonLocks = getLocks(out, work);
+        if (work.isFormatted()) {
+          out.print(jsonLocks);
+        }
       } else {
         if (work.isUserLevelExplain()) {
           // Because of the implementation of the JsonParserFactory, we are sure

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 16ba82e..0257801 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -32,6 +32,7 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -45,17 +46,25 @@ import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.LockComponentBuilder;
 import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 import org.apache.hadoop.hive.metastore.api.DataOperationType;
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockType;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.hooks.Entity;
+import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.AcidUtils.ParsedDelta;
 import org.apache.hadoop.hive.ql.io.orc.OrcFile;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater;
 import org.apache.hadoop.hive.ql.io.orc.Reader;
 import org.apache.hadoop.hive.ql.io.orc.Writer;
+import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
@@ -1984,4 +1993,202 @@ public class AcidUtils {
     tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "false");
     tblProps.remove(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
   }
+
+  private static boolean needsLock(Entity entity) {
+    switch (entity.getType()) {
+      case TABLE:
+        return isLockableTable(entity.getTable());
+      case PARTITION:
+        return isLockableTable(entity.getPartition().getTable());
+      default:
+        return true;
+    }
+  }
+
+  private static Table getTable(WriteEntity we) {
+    Table t = we.getTable();
+    if (t == null) {
+      throw new IllegalStateException("No table info for " + we);
+    }
+    return t;
+  }
+
+  private static boolean isLockableTable(Table t) {
+    if (t.isTemporary()) {
+      return false;
+    }
+    switch (t.getTableType()) {
+      case MANAGED_TABLE:
+      case MATERIALIZED_VIEW:
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  /**
+   * Create lock components from write/read entities.
+   * @param outputs write entities
+   * @param inputs read entities
+   * @param conf
+   * @return list with lock components
+   */
+  public static List<LockComponent> makeLockComponents(Set<WriteEntity> outputs, Set<ReadEntity> inputs,
+                                                       HiveConf conf) {
+    List<LockComponent> lockComponents = new ArrayList<>();
+    // For each source to read, get a shared lock
+    for (ReadEntity input : inputs) {
+      if (!input.needsLock() || input.isUpdateOrDelete() || !AcidUtils.needsLock(input)) {
+        // We don't want to acquire read locks during update or delete as we'll be acquiring write
+        // locks instead. Also, there's no need to lock temp tables since they're session wide
+        continue;
+      }
+      LockComponentBuilder compBuilder = new LockComponentBuilder();
+      compBuilder.setShared();
+      compBuilder.setOperationType(DataOperationType.SELECT);
+
+      Table t = null;
+      switch (input.getType()) {
+        case DATABASE:
+          compBuilder.setDbName(input.getDatabase().getName());
+          break;
+
+        case TABLE:
+          t = input.getTable();
+          compBuilder.setDbName(t.getDbName());
+          compBuilder.setTableName(t.getTableName());
+          break;
+
+        case PARTITION:
+        case DUMMYPARTITION:
+          compBuilder.setPartitionName(input.getPartition().getName());
+          t = input.getPartition().getTable();
+          compBuilder.setDbName(t.getDbName());
+          compBuilder.setTableName(t.getTableName());
+          break;
+
+        default:
+          // This is a file or something we don't hold locks for.
+          continue;
+      }
+      if (t != null) {
+        compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
+      }
+      LockComponent comp = compBuilder.build();
+      LOG.debug("Adding lock component to lock request " + comp.toString());
+      lockComponents.add(comp);
+    }
+    // For each source to write to, get the appropriate lock type.  If it's
+    // an OVERWRITE, we need to get an exclusive lock.  If it's an insert (no
+    // overwrite) than we need a shared.  If it's update or delete then we
+    // need a SEMI-SHARED.
+    for (WriteEntity output : outputs) {
+      LOG.debug("output is null " + (output == null));
+      if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR || !AcidUtils
+              .needsLock(output)) {
+        // We don't lock files or directories. We also skip locking temp tables.
+        continue;
+      }
+      LockComponentBuilder compBuilder = new LockComponentBuilder();
+      Table t = null;
+      switch (output.getType()) {
+        case DATABASE:
+          compBuilder.setDbName(output.getDatabase().getName());
+          break;
+
+        case TABLE:
+        case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
+          t = output.getTable();
+          compBuilder.setDbName(t.getDbName());
+          compBuilder.setTableName(t.getTableName());
+          break;
+
+        case PARTITION:
+          compBuilder.setPartitionName(output.getPartition().getName());
+          t = output.getPartition().getTable();
+          compBuilder.setDbName(t.getDbName());
+          compBuilder.setTableName(t.getTableName());
+          break;
+
+        default:
+          // This is a file or something we don't hold locks for.
+          continue;
+      }
+      switch (output.getWriteType()) {
+        /* base this on HiveOperation instead?  this and DDL_NO_LOCK is peppered all over the code...
+         Seems much cleaner if each stmt is identified as a particular HiveOperation (which I'd think
+         makes sense everywhere).  This however would be problematic for merge...*/
+        case DDL_EXCLUSIVE:
+          compBuilder.setExclusive();
+          compBuilder.setOperationType(DataOperationType.NO_TXN);
+          break;
+        case INSERT_OVERWRITE:
+          t = AcidUtils.getTable(output);
+          if (AcidUtils.isTransactionalTable(t)) {
+            if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK)) {
+              compBuilder.setExclusive();
+            } else {
+              compBuilder.setSemiShared();
+            }
+            compBuilder.setOperationType(DataOperationType.UPDATE);
+          } else {
+            compBuilder.setExclusive();
+            compBuilder.setOperationType(DataOperationType.NO_TXN);
+          }
+          break;
+        case INSERT:
+          assert t != null;
+          if (AcidUtils.isTransactionalTable(t)) {
+            compBuilder.setShared();
+          } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) {
+            final HiveStorageHandler storageHandler = Preconditions.checkNotNull(t.getStorageHandler(),
+                    "Thought all the non native tables have an instance of storage handler"
+            );
+            LockType lockType = storageHandler.getLockType(output);
+            if (null == LockType.findByValue(lockType.getValue())) {
+              throw new IllegalArgumentException(String
+                      .format("Lock type [%s] for Database.Table [%s.%s] is unknown", lockType, t.getDbName(),
+                              t.getTableName()));
+            }
+            compBuilder.setLock(lockType);
+          } else {
+              if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) {
+                  compBuilder.setExclusive();
+              } else {  // this is backward compatible for non-ACID resources, w/o ACID semantics
+                  compBuilder.setShared();
+              }
+          }
+          compBuilder.setOperationType(DataOperationType.INSERT);
+          break;
+        case DDL_SHARED:
+          compBuilder.setShared();
+          compBuilder.setOperationType(DataOperationType.NO_TXN);
+          break;
+
+        case UPDATE:
+          compBuilder.setSemiShared();
+          compBuilder.setOperationType(DataOperationType.UPDATE);
+          break;
+        case DELETE:
+          compBuilder.setSemiShared();
+          compBuilder.setOperationType(DataOperationType.DELETE);
+          break;
+
+        case DDL_NO_LOCK:
+          continue; // No lock required here
+
+        default:
+          throw new RuntimeException("Unknown write type " + output.getWriteType().toString());
+      }
+      if (t != null) {
+        compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
+      }
+
+      compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());
+      LockComponent comp = compBuilder.build();
+      LOG.debug("Adding lock component to lock request " + comp.toString());
+      lockComponents.add(comp);
+    }
+    return lockComponents;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 9bc4d2a..0c4ca53 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -429,180 +429,18 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
     rqstBuilder.setTransactionId(txnId)
         .setUser(username);
 
-    // For each source to read, get a shared lock
-    for (ReadEntity input : plan.getInputs()) {
-      if (!input.needsLock() || input.isUpdateOrDelete() || !needsLock(input)) {
-        // We don't want to acquire read locks during update or delete as we'll be acquiring write
-        // locks instead. Also, there's no need to lock temp tables since they're session wide
-        continue;
-      }
-      LockComponentBuilder compBuilder = new LockComponentBuilder();
-      compBuilder.setShared();
-      compBuilder.setOperationType(DataOperationType.SELECT);
-
-      Table t = null;
-      switch (input.getType()) {
-        case DATABASE:
-          compBuilder.setDbName(input.getDatabase().getName());
-          break;
-
-        case TABLE:
-          t = input.getTable();
-          compBuilder.setDbName(t.getDbName());
-          compBuilder.setTableName(t.getTableName());
-          break;
-
-        case PARTITION:
-        case DUMMYPARTITION:
-          compBuilder.setPartitionName(input.getPartition().getName());
-          t = input.getPartition().getTable();
-          compBuilder.setDbName(t.getDbName());
-          compBuilder.setTableName(t.getTableName());
-          break;
-
-        default:
-          // This is a file or something we don't hold locks for.
-          continue;
-      }
-      if(t != null) {
-        compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
-      }
-      LockComponent comp = compBuilder.build();
-      LOG.debug("Adding lock component to lock request " + comp.toString());
-      rqstBuilder.addLockComponent(comp);
-      atLeastOneLock = true;
+    if(plan.getInputs().isEmpty() && plan.getOutputs().isEmpty()) {
+      LOG.debug("No locks needed for queryId" + queryId);
+      return null;
     }
 
-    // For each source to write to, get the appropriate lock type.  If it's
-    // an OVERWRITE, we need to get an exclusive lock.  If it's an insert (no
-    // overwrite) than we need a shared.  If it's update or delete then we
-    // need a SEMI-SHARED.
-    for (WriteEntity output : plan.getOutputs()) {
-      LOG.debug("output is null " + (output == null));
-      if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR ||
-          !needsLock(output)) {
-        // We don't lock files or directories. We also skip locking temp tables.
-        continue;
-      }
-      LockComponentBuilder compBuilder = new LockComponentBuilder();
-      Table t = null;
-      switch (output.getType()) {
-        case DATABASE:
-          compBuilder.setDbName(output.getDatabase().getName());
-          break;
-
-        case TABLE:
-        case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
-          t = output.getTable();
-          compBuilder.setDbName(t.getDbName());
-          compBuilder.setTableName(t.getTableName());
-          break;
-
-        case PARTITION:
-          compBuilder.setPartitionName(output.getPartition().getName());
-          t = output.getPartition().getTable();
-          compBuilder.setDbName(t.getDbName());
-          compBuilder.setTableName(t.getTableName());
-          break;
-
-        default:
-          // This is a file or something we don't hold locks for.
-          continue;
-      }
-      switch (output.getWriteType()) {
-        /* base this on HiveOperation instead?  this and DDL_NO_LOCK is peppered all over the code...
-         Seems much cleaner if each stmt is identified as a particular HiveOperation (which I'd think
-         makes sense everywhere).  This however would be problematic for merge...*/
-      case DDL_EXCLUSIVE:
-        compBuilder.setExclusive();
-        compBuilder.setOperationType(DataOperationType.NO_TXN);
-        break;
-      case INSERT_OVERWRITE:
-        t = getTable(output);
-        if (AcidUtils.isTransactionalTable(t)) {
-          if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK)) {
-            compBuilder.setExclusive();
-          } else {
-            compBuilder.setSemiShared();
-          }
-          compBuilder.setOperationType(DataOperationType.UPDATE);
-        } else {
-          compBuilder.setExclusive();
-          compBuilder.setOperationType(DataOperationType.NO_TXN);
-        }
-        break;
-      case INSERT:
-        assert t != null;
-        if (AcidUtils.isTransactionalTable(t)) {
-          compBuilder.setShared();
-        } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) {
-          final HiveStorageHandler storageHandler = Preconditions.checkNotNull(t.getStorageHandler(),
-              "Thought all the non native tables have an instance of storage handler"
-          );
-          LockType lockType = storageHandler.getLockType(output);
-          switch (lockType) {
-          case EXCLUSIVE:
-            compBuilder.setExclusive();
-            break;
-          case SHARED_READ:
-            compBuilder.setShared();
-            break;
-          case SHARED_WRITE:
-            compBuilder.setSemiShared();
-            break;
-          default:
-            throw new IllegalArgumentException(String
-                .format("Lock type [%s] for Database.Table [%s.%s] is unknown", lockType, t.getDbName(),
-                    t.getTableName()
-                ));
-          }
-
-        } else {
-          if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) {
-            compBuilder.setExclusive();
-          } else {  // this is backward compatible for non-ACID resources, w/o ACID semantics
-            compBuilder.setShared();
-          }
-        }
-        compBuilder.setOperationType(DataOperationType.INSERT);
-        break;
-      case DDL_SHARED:
-        compBuilder.setShared();
-        compBuilder.setOperationType(DataOperationType.NO_TXN);
-        break;
-
-      case UPDATE:
-        compBuilder.setSemiShared();
-        compBuilder.setOperationType(DataOperationType.UPDATE);
-        break;
-      case DELETE:
-        compBuilder.setSemiShared();
-        compBuilder.setOperationType(DataOperationType.DELETE);
-        break;
-
-      case DDL_NO_LOCK:
-        continue; // No lock required here
-
-      default:
-        throw new RuntimeException("Unknown write type " + output.getWriteType().toString());
-      }
-      if (t != null) {
-        compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
-      }
-
-      compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());
-      LockComponent comp = compBuilder.build();
-      LOG.debug("Adding lock component to lock request " + comp.toString());
-      rqstBuilder.addLockComponent(comp);
-      atLeastOneLock = true;
-    }
-    //plan
-    // Make sure we need locks.  It's possible there's nothing to lock in
-    // this operation.
-    if (!atLeastOneLock) {
+    List<LockComponent> lockComponents = AcidUtils.makeLockComponents(plan.getOutputs(), plan.getInputs(), conf);
+    //It's possible there's nothing to lock even if we have w/r entities.
+    if(lockComponents.isEmpty()) {
       LOG.debug("No locks needed for queryId" + queryId);
       return null;
     }
+    rqstBuilder.addLockComponents(lockComponents);
 
     List<HiveLock> locks = new ArrayList<HiveLock>(1);
     LockState lockState = lockMgr.lock(rqstBuilder.build(), queryId, isBlocking, locks);

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 14e60f0..03b0269 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -1087,4 +1087,8 @@ public class Table implements Serializable {
   public boolean hasDeserializer() {
     return deserializer != null;
   }
+
+  public String getCatalogName() {
+    return this.tTable.getCatName();
+  }
 };

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java
index 105ef08..5ca6b59 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java
@@ -47,6 +47,7 @@ public class ExplainConfiguration {
   private boolean vectorization = false;
   private boolean vectorizationOnly = false;
   private VectorizationDetailLevel vectorizationDetailLevel = VectorizationDetailLevel.SUMMARY;
+  private boolean locks = false;
 
   private Path explainRootPath;
   private Map<String, Long> opIdToRuntimeNumRows;
@@ -153,4 +154,11 @@ public class ExplainConfiguration {
     this.opIdToRuntimeNumRows = opIdToRuntimeNumRows;
   }
 
+  public boolean isLocks() {
+    return locks;
+  }
+
+  public void setLocks(boolean locks) {
+    this.locks = locks;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
index 2a1de44..5594faf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
@@ -113,6 +113,8 @@ public class ExplainSemanticAnalyzer extends BaseSemanticAnalyzer {
             i++;
           }
         }
+      } else if (explainOptions == HiveParser.KW_LOCKS) {
+        config.setLocks(true);
       } else {
         // UNDONE: UNKNOWN OPTION?
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index e49b499..a23628e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -795,6 +795,7 @@ explainOption
     | KW_AUTHORIZATION
     | KW_ANALYZE
     | KW_REOPTIMIZATION
+    | KW_LOCKS
     | (KW_VECTORIZATION vectorizationOnly? vectorizatonDetail?)
     ;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainLockDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainLockDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainLockDesc.java
new file mode 100644
index 0000000..1354b8f
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainLockDesc.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockType;
+
+import java.util.ArrayList;
+
+/**
+ * ExplainLockDesc represents lock entity in query plan.
+ */
+public class ExplainLockDesc {
+    private String catalogName;
+    private String dbName;
+    private String tableName;
+    private String partitionName;
+    private LockType lockType;
+
+    public ExplainLockDesc() {
+    }
+
+    public ExplainLockDesc(LockComponent component) {
+        this.dbName = component.getDbname();
+        if (null != component.getTablename()) {
+            this.tableName = component.getTablename();
+        }
+        if (null != component.getPartitionname()) {
+            this.partitionName = component.getPartitionname();
+        }
+        this.lockType = component.getType();
+    }
+
+    public String getCatalogName() {
+        return catalogName;
+    }
+
+    public ExplainLockDesc setCatalogName(String catalogName) {
+        this.catalogName = catalogName;
+        return this;
+    }
+
+    public String getDbName() {
+        return dbName;
+    }
+
+    public ExplainLockDesc setDbName(String dbName) {
+        this.dbName = dbName;
+        return this;
+    }
+
+    public String getTableName() {
+        return tableName;
+    }
+
+    public ExplainLockDesc setTableName(String tableName) {
+        this.tableName = tableName;
+        return this;
+    }
+
+    public String getPartitionName() {
+        return partitionName;
+    }
+
+    public ExplainLockDesc setPartitionName(String partitionName) {
+        this.partitionName = partitionName;
+        return this;
+    }
+
+    public LockType getLockType() {
+        return lockType;
+    }
+
+    public ExplainLockDesc setLockType(LockType lockType) {
+        this.lockType = lockType;
+        return this;
+    }
+
+    public String getFullName() {
+        ArrayList<String> list = new ArrayList<String>();
+        if (null != catalogName) {
+            list.add(catalogName);
+        }
+        if (null != dbName) {
+            list.add(dbName);
+        }
+        if (null != tableName) {
+            list.add(tableName);
+        }
+        if (null != partitionName) {
+            list.add(partitionName);
+        }
+        return StringUtils.join(list, '.');
+    }
+
+    @Override public String toString() {
+        return getFullName() + " -> " + this.getLockType();
+    }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
index 3a42a68..a25c435 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
@@ -26,6 +26,7 @@ import java.util.List;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.VectorizationDetailLevel;
@@ -42,6 +43,7 @@ public class ExplainWork implements Serializable {
   private ArrayList<Task<? extends Serializable>> rootTasks;
   private Task<? extends Serializable> fetchTask;
   private HashSet<ReadEntity> inputs;
+  private HashSet<WriteEntity> outputs;
   private ParseContext pCtx;
 
   private ExplainConfiguration config;
@@ -72,6 +74,9 @@ public class ExplainWork implements Serializable {
     if (analyzer != null) {
       this.inputs = analyzer.getInputs();
     }
+    if (analyzer != null) {
+      this.outputs = analyzer.getAllOutputs();
+    }
     this.pCtx = pCtx;
     this.cboInfo = cboInfo;
     this.optimizedSQL = optimizedSQL;
@@ -110,6 +115,14 @@ public class ExplainWork implements Serializable {
     this.inputs = inputs;
   }
 
+  public HashSet<WriteEntity> getOutputs() {
+    return outputs;
+  }
+
+  public void setOutputs(HashSet<WriteEntity> outputs) {
+    this.outputs = outputs;
+  }
+
   public boolean getExtended() {
     return config.isExtended();
   }
@@ -190,4 +203,8 @@ public class ExplainWork implements Serializable {
     this.config = config;
   }
 
+  public boolean isLocks() {
+    return config.isLocks();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/test/queries/clientpositive/explain_locks.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explain_locks.q b/ql/src/test/queries/clientpositive/explain_locks.q
new file mode 100644
index 0000000..a0e273f
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/explain_locks.q
@@ -0,0 +1,22 @@
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+explain locks drop table test_explain_locks;
+explain locks create table test_explain_locks (a int, b int);
+drop table if exists target;
+drop table if exists source;
+
+create table target (a int, b int) partitioned by (p int, q int) clustered by (a) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true');
+create table source (a1 int, b1 int, p1 int, q1 int) clustered by (a1) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true');
+insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), (5,6,1,3), (7,8,2,2);
+
+-- the intent here is to record the set of ReadEntity and WriteEntity objects for these 2 update statements
+explain locks update target set b = 1 where p in (select t.q1 from source t where t.a1=5);
+
+explain locks update source set b1 = 1 where p1 in (select t.q from target t where t.p=2);
+
+explain formatted locks update source set b1 = 1 where p1 in (select t.q from target t where t.p=2);
+
+-- the extra predicates in when matched clause match 1 partition
+explain locks merge into target t using source s on t.a = s.a1 when matched and p = 1 and q = 2 then update set b = 1 when matched and p = 2 and q = 2 then delete when not matched and a1 > 100 then insert values(s.a1,s.b1,s.p1, s.q1);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/ql/src/test/results/clientpositive/explain_locks.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_locks.q.out b/ql/src/test/results/clientpositive/explain_locks.q.out
new file mode 100644
index 0000000..72aa410
--- /dev/null
+++ b/ql/src/test/results/clientpositive/explain_locks.q.out
@@ -0,0 +1,91 @@
+PREHOOK: query: explain locks drop table test_explain_locks
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: explain locks drop table test_explain_locks
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: explain locks create table test_explain_locks (a int, b int)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: explain locks create table test_explain_locks (a int, b int)
+POSTHOOK: type: CREATETABLE
+LOCK INFORMATION:
+default -> SHARED_READ
+PREHOOK: query: drop table if exists target
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists target
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists source
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists source
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table target (a int, b int) partitioned by (p int, q int) clustered by (a) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@target
+POSTHOOK: query: create table target (a int, b int) partitioned by (p int, q int) clustered by (a) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@target
+PREHOOK: query: create table source (a1 int, b1 int, p1 int, q1 int) clustered by (a1) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@source
+POSTHOOK: query: create table source (a1 int, b1 int, p1 int, q1 int) clustered by (a1) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@source
+PREHOOK: query: insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), (5,6,1,3), (7,8,2,2)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@target
+POSTHOOK: query: insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), (5,6,1,3), (7,8,2,2)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@target@p=1/q=2
+POSTHOOK: Output: default@target@p=1/q=3
+POSTHOOK: Output: default@target@p=2/q=2
+POSTHOOK: Lineage: target PARTITION(p=1,q=2).a SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=1,q=2).b SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=1,q=3).a SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=1,q=3).b SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=2,q=2).a SCRIPT []
+POSTHOOK: Lineage: target PARTITION(p=2,q=2).b SCRIPT []
+PREHOOK: query: explain locks update target set b = 1 where p in (select t.q1 from source t where t.a1=5)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain locks update target set b = 1 where p in (select t.q1 from source t where t.a1=5)
+POSTHOOK: type: QUERY
+LOCK INFORMATION:
+default.source -> SHARED_READ
+default.target.p=1/q=2 -> SHARED_READ
+default.target.p=1/q=3 -> SHARED_READ
+default.target.p=2/q=2 -> SHARED_READ
+default.target.p=2/q=2 -> SHARED_WRITE
+default.target.p=1/q=3 -> SHARED_WRITE
+default.target.p=1/q=2 -> SHARED_WRITE
+PREHOOK: query: explain locks update source set b1 = 1 where p1 in (select t.q from target t where t.p=2)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain locks update source set b1 = 1 where p1 in (select t.q from target t where t.p=2)
+POSTHOOK: type: QUERY
+LOCK INFORMATION:
+default.target -> SHARED_READ
+default.target.p=2/q=2 -> SHARED_READ
+default.source -> SHARED_WRITE
+PREHOOK: query: explain formatted locks update source set b1 = 1 where p1 in (select t.q from target t where t.p=2)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain formatted locks update source set b1 = 1 where p1 in (select t.q from target t where t.p=2)
+POSTHOOK: type: QUERY
+{"LOCK INFORMATION:":"[default.target -> SHARED_READ, default.target.p=2/q=2 -> SHARED_READ, default.source -> SHARED_WRITE]"}
+PREHOOK: query: explain locks merge into target t using source s on t.a = s.a1 when matched and p = 1 and q = 2 then update set b = 1 when matched and p = 2 and q = 2 then delete when not matched and a1 > 100 then insert values(s.a1,s.b1,s.p1, s.q1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain locks merge into target t using source s on t.a = s.a1 when matched and p = 1 and q = 2 then update set b = 1 when matched and p = 2 and q = 2 then delete when not matched and a1 > 100 then insert values(s.a1,s.b1,s.p1, s.q1)
+POSTHOOK: type: QUERY
+LOCK INFORMATION:
+default.source -> SHARED_READ
+default.target.p=1/q=2 -> SHARED_READ
+default.target.p=1/q=3 -> SHARED_READ
+default.target.p=2/q=2 -> SHARED_READ
+default.target.p=2/q=2 -> SHARED_WRITE
+default.target.p=2/q=2 -> SHARED_WRITE
+default.target.p=1/q=3 -> SHARED_WRITE
+default.target.p=1/q=3 -> SHARED_WRITE
+default.target.p=1/q=2 -> SHARED_WRITE
+default.target.p=1/q=2 -> SHARED_WRITE
+default.target -> SHARED_READ

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
index 1ad0638..8b8e887 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
@@ -118,4 +118,10 @@ public class LockComponentBuilder {
     component.setLevel(level);
     return component;
   }
+
+
+  public LockComponent setLock(LockType type) {
+    component.setType(type);
+    return component;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/96e7c472/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
index d03c73a..22902a9 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.api.LockType;
 
 import java.net.InetAddress;
 import java.net.UnknownHostException;
+import java.util.Collection;
 import java.util.LinkedHashMap;
 import java.util.Map;
 
@@ -94,6 +95,16 @@ public class LockRequestBuilder {
     return this;
   }
 
+  /**
+   * Add a collection with lock components to the lock request
+   * @param components to add
+   * @return reference to this builder
+   */
+  public LockRequestBuilder addLockComponents(Collection<LockComponent> components) {
+    trie.addAll(components);
+    return this;
+  }
+
   // For reasons that are completely incomprehensible to me the semantic
   // analyzers often ask for multiple locks on the same entity (for example
   // a shared_read and an exlcusive lock).  The db locking system gets confused
@@ -120,6 +131,12 @@ public class LockRequestBuilder {
       setTable(comp, tabs);
     }
 
+    public void addAll(Collection<LockComponent> components) {
+      for(LockComponent component: components) {
+        add(component);
+      }
+    }
+
     public void addLocksToRequest(LockRequest request) {
       for (TableTrie tab : trie.values()) {
         for (PartTrie part : tab.values()) {