You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/03/28 17:11:25 UTC

svn commit: r1582792 - in /hive/trunk/ql/src: java/org/apache/hadoop/hive/ql/exec/ java/org/apache/hadoop/hive/ql/hooks/ java/org/apache/hadoop/hive/ql/lockmgr/ java/org/apache/hadoop/hive/ql/parse/ java/org/apache/hadoop/hive/ql/parse/authorization/ t...

Author: hashutosh
Date: Fri Mar 28 16:11:25 2014
New Revision: 1582792

URL: http://svn.apache.org/r1582792
Log:
HIVE-6734 : DDL locking too course grained in new db txn manager (Alan Gates via Ashutosh Chauhan)

Added:
    hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q
    hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out
Modified:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Fri Mar 28 16:11:25 2014
@@ -1116,7 +1116,7 @@ public class DDLTask extends Task<DDLWor
             MetaStoreUtils.getIndexTableName(SessionState.get().getCurrentDatabase(),
                 crtIndex.getTableName(), crtIndex.getIndexName());
           Table indexTable = db.getTable(indexTableName);
-          work.getOutputs().add(new WriteEntity(indexTable, WriteEntity.WriteType.DDL));
+          work.getOutputs().add(new WriteEntity(indexTable, WriteEntity.WriteType.DDL_NO_LOCK));
     }
     return 0;
   }
@@ -1235,7 +1235,8 @@ public class DDLTask extends Task<DDLWor
     Partition newPart = db
         .getPartition(tbl, renamePartitionDesc.getNewPartSpec(), false);
     work.getInputs().add(new ReadEntity(oldPart));
-    work.getOutputs().add(new WriteEntity(newPart, WriteEntity.WriteType.DDL));
+    // We've already obtained a lock on the table, don't lock the partition too
+    work.getOutputs().add(new WriteEntity(newPart, WriteEntity.WriteType.DDL_NO_LOCK));
     return 0;
   }
 
@@ -1277,7 +1278,8 @@ public class DDLTask extends Task<DDLWor
     }
 
     work.getInputs().add(new ReadEntity(tbl));
-    work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL));
+    // We've already locked the table as the input, don't relock it as the output.
+    work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
 
     return 0;
   }
@@ -1306,7 +1308,7 @@ public class DDLTask extends Task<DDLWor
         throw new HiveException("Uable to update table");
       }
       work.getInputs().add(new ReadEntity(tbl));
-      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_METADATA_ONLY));
+      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     } else {
       Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
       if (part == null) {
@@ -1318,7 +1320,7 @@ public class DDLTask extends Task<DDLWor
         throw new HiveException(e);
       }
       work.getInputs().add(new ReadEntity(part));
-      work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_METADATA_ONLY));
+      work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
     }
     return 0;
   }
@@ -3817,19 +3819,20 @@ public class DDLTask extends Task<DDLWor
     // contains the new table. This is needed for rename - both the old and the
     // new table names are
     // passed
+    // Don't acquire locks for any of these, we have already asked for them in DDLSemanticAnalyzer.
     if(part != null) {
       work.getInputs().add(new ReadEntity(part));
-      work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL));
+      work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
     }
     else if (allPartitions != null ){
       for (Partition tmpPart: allPartitions) {
         work.getInputs().add(new ReadEntity(tmpPart));
-        work.getOutputs().add(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL));
+        work.getOutputs().add(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL_NO_LOCK));
       }
     }
     else {
       work.getInputs().add(new ReadEntity(oldTbl));
-      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL));
+      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     }
     return 0;
   }
@@ -3867,7 +3870,8 @@ public class DDLTask extends Task<DDLWor
         dropTbl.getPartSpecs(), true, dropTbl.getIgnoreProtection(), true);
     for (Partition partition : droppedParts) {
       console.printInfo("Dropped the partition " + partition.getName());
-      work.getOutputs().add(new WriteEntity(partition, WriteEntity.WriteType.DDL));
+      // We have already locked the table, don't lock the partitions.
+      work.getOutputs().add(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK));
     };
   }
 
@@ -3921,7 +3925,8 @@ public class DDLTask extends Task<DDLWor
     // drop the table
     db.dropTable(dropTbl.getTableName());
     if (tbl != null) {
-      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL));
+      // We have already locked the table in DDLSemenaticAnalyzer, don't do it again here
+      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     }
   }
 
@@ -4196,7 +4201,7 @@ public class DDLTask extends Task<DDLWor
 
     // create the table
     db.createTable(tbl, crtTbl.getIfNotExists());
-    work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL));
+    work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     return 0;
   }
 
@@ -4304,7 +4309,7 @@ public class DDLTask extends Task<DDLWor
 
     // create the table
     db.createTable(tbl, crtTbl.getIfNotExists());
-    work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL));
+    work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     return 0;
   }
 
@@ -4340,7 +4345,7 @@ public class DDLTask extends Task<DDLWor
       } catch (InvalidOperationException e) {
         throw new HiveException(e);
       }
-      work.getOutputs().add(new WriteEntity(oldview, WriteEntity.WriteType.DDL));
+      work.getOutputs().add(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
     } else {
       // create new view
       Table tbl = db.newTable(crtView.getViewName());
@@ -4367,7 +4372,7 @@ public class DDLTask extends Task<DDLWor
       }
 
       db.createTable(tbl, crtView.getIfNotExists());
-      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL));
+      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     }
     return 0;
   }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java Fri Mar 28 16:11:25 2014
@@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.ql.metadata.DummyPartition;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 
 import java.io.Serializable;
 
@@ -35,10 +36,10 @@ public class WriteEntity extends Entity 
   private boolean isTempURI = false;
 
   public static enum WriteType {
-    DDL, // for use in DDL statements that will touch data,
-         // will result in an exclusive lock,
-    DDL_METADATA_ONLY, // for use in DDL statements that touch only
-                       // metadata and don't need a lock
+    DDL_EXCLUSIVE, // for use in DDL statements that require an exclusive lock,
+                   // such as dropping a table or partition
+    DDL_SHARED, // for use in DDL operations that only need a shared lock, such as creating a table
+    DDL_NO_LOCK, // for use in DDL statements that do not require a lock
     INSERT,
     INSERT_OVERWRITE,
     UPDATE,
@@ -147,4 +148,43 @@ public class WriteEntity extends Entity 
     return isTempURI;
   }
 
+  /**
+   * Determine the type of lock to request for a given alter table type.
+   * @param op Operation type from the alter table description
+   * @return the write type this should use.
+   */
+  public static WriteType determineAlterTableWriteType(AlterTableDesc.AlterTableTypes op) {
+    switch (op) {
+      case RENAMECOLUMN:
+      case ADDCLUSTERSORTCOLUMN:
+      case ADDFILEFORMAT:
+      case ADDSERDE:
+      case DROPPROPS:
+      case REPLACECOLS:
+      case ARCHIVE:
+      case UNARCHIVE:
+      case ALTERPROTECTMODE:
+      case ALTERPARTITIONPROTECTMODE:
+      case ALTERLOCATION:
+      case DROPPARTITION:
+      case RENAMEPARTITION:
+      case ADDSKEWEDBY:
+      case ALTERSKEWEDLOCATION:
+      case ALTERBUCKETNUM:
+      case ALTERPARTITION:
+      case ADDCOLS:
+      case RENAME:  return WriteType.DDL_EXCLUSIVE;
+
+      case ADDPARTITION:
+      case ADDSERDEPROPS:
+      case ADDPROPS: return WriteType.DDL_SHARED;
+
+      case COMPACT:
+      case TOUCH: return WriteType.DDL_NO_LOCK;
+
+      default:
+        throw new RuntimeException("Unknown operation " + op.toString());
+    }
+  }
+
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java Fri Mar 28 16:11:25 2014
@@ -135,13 +135,13 @@ public class DbTxnManager extends HiveTx
       Table t = null;
       LOG.debug("output is null " + (output == null));
       switch (output.getWriteType()) {
-        case DDL:
+        case DDL_EXCLUSIVE:
         case INSERT_OVERWRITE:
           compBuilder.setExclusive();
           break;
 
         case INSERT:
-        case DDL_METADATA_ONLY:
+        case DDL_SHARED:
           compBuilder.setShared();
           break;
 
@@ -150,6 +150,9 @@ public class DbTxnManager extends HiveTx
           compBuilder.setSemiShared();
           break;
 
+        case DDL_NO_LOCK:
+          continue; // No lock required here
+
         default:
           throw new RuntimeException("Unknown write type " +
               output.getWriteType().toString());

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Fri Mar 28 16:11:25 2014
@@ -615,7 +615,7 @@ public class DDLSemanticAnalyzer extends
 
   private void addAlterDbDesc(AlterDatabaseDesc alterDesc) throws SemanticException {
     Database database = getDatabase(alterDesc.getDatabaseName());
-    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY));
+    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterDesc), conf));
   }
 
@@ -768,12 +768,14 @@ public class DDLSemanticAnalyzer extends
       if (tableNames != null) {
         for (String tableName : tableNames) {
           Table table = getTable(dbName, tableName, true);
-          outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL));
+          // We want no lock here, as the database lock will cover the tables,
+          // and putting a lock will actually cause us to deadlock on ourselves.
+          outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
         }
       }
     }
     inputs.add(new ReadEntity(database));
-    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL));
+    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_EXCLUSIVE));
 
     DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists, ifCascade);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropDatabaseDesc), conf));
@@ -799,7 +801,7 @@ public class DDLSemanticAnalyzer extends
     Table tab = getTable(tableName, throwException);
     if (tab != null) {
       inputs.add(new ReadEntity(tab));
-      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL));
+      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE));
     }
 
     DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists);
@@ -824,19 +826,19 @@ public class DDLSemanticAnalyzer extends
     Map<String, String> partSpec = getPartSpec((ASTNode) root.getChild(1));
     if (partSpec == null) {
       if (!table.isPartitioned()) {
-        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL));
+        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_EXCLUSIVE));
       } else {
         for (Partition partition : getPartitions(table, null, false)) {
-          outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL));
+          outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
         }
       }
     } else {
       if (isFullSpec(table, partSpec)) {
         Partition partition = getPartition(table, partSpec, true);
-        outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL));
+        outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
       } else {
         for (Partition partition : getPartitions(table, partSpec, false)) {
-          outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL));
+          outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
         }
       }
     }
@@ -1373,19 +1375,22 @@ public class DDLSemanticAnalyzer extends
   private void addInputsOutputsAlterTable(String tableName, Map<String, String> partSpec,
       AlterTableDesc desc) throws SemanticException {
     Table tab = getTable(tableName, true);
+    // Determine the lock type to acquire
+    WriteEntity.WriteType writeType = desc == null ? WriteEntity.WriteType.DDL_EXCLUSIVE :
+        WriteEntity.determineAlterTableWriteType(desc.getOp());
     if (partSpec == null || partSpec.isEmpty()) {
       inputs.add(new ReadEntity(tab));
-      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL));
+      outputs.add(new WriteEntity(tab, writeType));
     }
     else {
       inputs.add(new ReadEntity(tab));
       if (desc == null || desc.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
         Partition part = getPartition(tab, partSpec, true);
-        outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL));
+        outputs.add(new WriteEntity(part, writeType));
       }
       else {
         for (Partition part : getPartitions(tab, partSpec, true)) {
-          outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL));
+          outputs.add(new WriteEntity(part, writeType));
         }
       }
     }
@@ -2648,7 +2653,7 @@ public class DDLSemanticAnalyzer extends
     Table tab = getTable(tblName, true);
     boolean isView = tab.isView();
     validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView);
-    outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL));
+    outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED));
 
     int numCh = ast.getChildCount();
     int start = ifNotExists ? 2 : 1;
@@ -2782,7 +2787,7 @@ public class DDLSemanticAnalyzer extends
       AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc(
           SessionState.get().getCurrentDatabase(), tblName, null,
           AlterTableDesc.AlterTableTypes.TOUCH);
-      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_METADATA_ONLY));
+      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK));
       rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
           touchDesc), conf));
     } else {
@@ -3050,7 +3055,8 @@ public class DDLSemanticAnalyzer extends
         }
       }
       for (Partition p : parts) {
-        outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL));
+        // Don't request any locks here, as the table has already been locked.
+        outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL_NO_LOCK));
       }
     }
   }
@@ -3118,7 +3124,7 @@ public class DDLSemanticAnalyzer extends
     Table tab = getTable(tableName, true);
 
     inputs.add(new ReadEntity(tab));
-    outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL));
+    outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE));
 
     validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY);
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java Fri Mar 28 16:11:25 2014
@@ -171,7 +171,7 @@ public class FunctionSemanticAnalyzer ex
       }
     }
     if (database != null) {
-      outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY));
+      outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
     }
   }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java Fri Mar 28 16:11:25 2014
@@ -226,7 +226,7 @@ public class ImportSemanticAnalyzer exte
           loadTable(fromURI, table);
         }
         // Set this to read because we can't overwrite any existing partitions
-        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_METADATA_ONLY));
+        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
       } catch (InvalidTableException e) {
         LOG.debug("table " + tblDesc.getTableName() + " does not exist");
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java Fri Mar 28 16:11:25 2014
@@ -171,6 +171,6 @@ public class MacroSemanticAnalyzer exten
   private void addEntities() throws SemanticException {
     Database database = getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME);
     // This restricts macro creation to privileged users.
-    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY));
+    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
   }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Fri Mar 28 16:11:25 2014
@@ -8830,7 +8830,7 @@ public class SemanticAnalyzer extends Ba
       tsDesc.setStatsAggPrefix(tab.getDbName()+"."+k);
 
       // set up WritenEntity for replication
-      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_METADATA_ONLY));
+      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED));
 
       // add WriteEntity for each matching partition
       if (tab.isPartitioned()) {
@@ -8841,7 +8841,7 @@ public class SemanticAnalyzer extends Ba
         if (partitions != null) {
           for (Partition partn : partitions) {
             // inputs.add(new ReadEntity(partn)); // is this needed at all?
-            outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_METADATA_ONLY));
+            outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_NO_LOCK));
           }
         }
       }
@@ -9945,7 +9945,7 @@ public class SemanticAnalyzer extends Ba
     String[] qualified = Hive.getQualifiedNames(tableName);
     String dbName = qualified.length == 1 ? SessionState.get().getCurrentDatabase() : qualified[0];
     Database database  = getDatabase(dbName);
-    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY));
+    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED));
     // Handle different types of CREATE TABLE command
     CreateTableDesc crtTblDesc = null;
     switch (command_type) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java Fri Mar 28 16:11:25 2014
@@ -252,9 +252,9 @@ public class HiveAuthorizationTaskFactor
       Table tbl = getTable(SessionState.get().getCurrentDatabase(), subject.getObject());
       if (subject.getPartSpec() != null) {
         Partition part = getPartition(tbl, subject.getPartSpec());
-        outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_METADATA_ONLY));
+        outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
       } else {
-        outputs.add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_METADATA_ONLY));
+        outputs.add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
       }
     }
 

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java?rev=1582792&r1=1582791&r2=1582792&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java Fri Mar 28 16:11:25 2014
@@ -181,8 +181,8 @@ public class TestDbTxnManager {
   }
 
   @Test
-  public void testDDL() throws Exception {
-    WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL);
+  public void testDDLExclusive() throws Exception {
+    WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_EXCLUSIVE);
     QueryPlan qp = new MockQueryPlan(this);
     txnMgr.acquireLocks(qp, ctx, "fred");
     List<HiveLock> locks = ctx.getHiveLocks();
@@ -194,6 +194,30 @@ public class TestDbTxnManager {
     Assert.assertEquals(0, locks.size());
   }
 
+  @Test
+  public void testDDLShared() throws Exception {
+    WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_SHARED);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testDDLNoLock() throws Exception {
+    WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_NO_LOCK);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertNull(locks);
+  }
+
+
   @Before
   public void setUp() throws Exception {
     TxnDbUtil.prepDb();

Added: hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q?rev=1582792&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q Fri Mar 28 16:11:25 2014
@@ -0,0 +1,59 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create database D1;
+
+alter database D1 set dbproperties('test'='yesthisis');
+
+drop database D1;
+
+create table T1(key string, val string) stored as textfile;
+
+create table T2 like T1;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T3 as select * from T1;
+
+create table T4 (key char(10), val decimal(5,2), b int)
+    partitioned by (ds string)
+    clustered by (b) into 10 buckets
+    stored as orc;
+
+alter table T3 rename to newT3;
+
+alter table T2 set tblproperties ('test'='thisisatest');
+
+alter table T2 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde';
+alter table T2 set serdeproperties ('test'='thisisatest');
+
+alter table T2 clustered by (key) into 32 buckets;
+
+alter table T4 add partition (ds='today'); 
+
+alter table T4 partition (ds='today') rename to partition(ds='yesterday');
+
+alter table T4 drop partition (ds='yesterday');
+
+alter table T4 add partition (ds='tomorrow'); 
+
+create table T5 (a string, b int);
+alter table T5 set fileformat orc;
+
+create table T7 (a string, b int);
+alter table T7 set location 'file:///tmp';
+
+alter table T2 touch;
+alter table T4 touch partition (ds='tomorrow');
+
+create view V1 as select key from T1;
+alter view V1 set tblproperties ('test'='thisisatest');
+drop view V1;
+
+
+
+drop table T1;
+drop table T2;
+drop table newT3;

Added: hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out?rev=1582792&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out Fri Mar 28 16:11:25 2014
@@ -0,0 +1,242 @@
+PREHOOK: query: create database D1
+PREHOOK: type: CREATEDATABASE
+POSTHOOK: query: create database D1
+POSTHOOK: type: CREATEDATABASE
+PREHOOK: query: alter database D1 set dbproperties('test'='yesthisis')
+PREHOOK: type: ALTERDATABASE
+PREHOOK: Output: database:d1
+POSTHOOK: query: alter database D1 set dbproperties('test'='yesthisis')
+POSTHOOK: type: ALTERDATABASE
+POSTHOOK: Output: database:d1
+PREHOOK: query: drop database D1
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:d1
+PREHOOK: Output: database:d1
+POSTHOOK: query: drop database D1
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:d1
+POSTHOOK: Output: database:d1
+PREHOOK: query: create table T1(key string, val string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T1(key string, val string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: create table T2 like T1
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T2 like T1
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: create table T3 as select * from T1
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t1
+POSTHOOK: query: create table T3 as select * from T1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@T3
+PREHOOK: query: create table T4 (key char(10), val decimal(5,2), b int)
+    partitioned by (ds string)
+    clustered by (b) into 10 buckets
+    stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T4 (key char(10), val decimal(5,2), b int)
+    partitioned by (ds string)
+    clustered by (b) into 10 buckets
+    stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T4
+PREHOOK: query: alter table T3 rename to newT3
+PREHOOK: type: ALTERTABLE_RENAME
+PREHOOK: Input: default@t3
+PREHOOK: Output: default@t3
+POSTHOOK: query: alter table T3 rename to newT3
+POSTHOOK: type: ALTERTABLE_RENAME
+POSTHOOK: Input: default@t3
+POSTHOOK: Output: default@newT3
+POSTHOOK: Output: default@t3
+PREHOOK: query: alter table T2 set tblproperties ('test'='thisisatest')
+PREHOOK: type: ALTERTABLE_PROPERTIES
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: alter table T2 set tblproperties ('test'='thisisatest')
+POSTHOOK: type: ALTERTABLE_PROPERTIES
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+PREHOOK: query: alter table T2 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'
+PREHOOK: type: ALTERTABLE_SERIALIZER
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: alter table T2 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'
+POSTHOOK: type: ALTERTABLE_SERIALIZER
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+PREHOOK: query: alter table T2 set serdeproperties ('test'='thisisatest')
+PREHOOK: type: ALTERTABLE_SERDEPROPERTIES
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: alter table T2 set serdeproperties ('test'='thisisatest')
+POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+PREHOOK: query: alter table T2 clustered by (key) into 32 buckets
+PREHOOK: type: ALTERTABLE_CLUSTER_SORT
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: alter table T2 clustered by (key) into 32 buckets
+POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+PREHOOK: query: alter table T4 add partition (ds='today')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@t4
+POSTHOOK: query: alter table T4 add partition (ds='today')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@t4
+POSTHOOK: Output: default@t4@ds=today
+PREHOOK: query: alter table T4 partition (ds='today') rename to partition(ds='yesterday')
+PREHOOK: type: ALTERTABLE_RENAMEPART
+PREHOOK: Input: default@t4
+PREHOOK: Output: default@t4@ds=today
+POSTHOOK: query: alter table T4 partition (ds='today') rename to partition(ds='yesterday')
+POSTHOOK: type: ALTERTABLE_RENAMEPART
+POSTHOOK: Input: default@t4
+POSTHOOK: Input: default@t4@ds=today
+POSTHOOK: Output: default@t4@ds=today
+POSTHOOK: Output: default@t4@ds=yesterday
+PREHOOK: query: alter table T4 drop partition (ds='yesterday')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@t4
+PREHOOK: Output: default@t4@ds=yesterday
+POSTHOOK: query: alter table T4 drop partition (ds='yesterday')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@t4
+POSTHOOK: Output: default@t4@ds=yesterday
+PREHOOK: query: alter table T4 add partition (ds='tomorrow')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@t4
+POSTHOOK: query: alter table T4 add partition (ds='tomorrow')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@t4
+POSTHOOK: Output: default@t4@ds=tomorrow
+PREHOOK: query: create table T5 (a string, b int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T5 (a string, b int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T5
+PREHOOK: query: alter table T5 set fileformat orc
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@t5
+PREHOOK: Output: default@t5
+POSTHOOK: query: alter table T5 set fileformat orc
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@t5
+POSTHOOK: Output: default@t5
+PREHOOK: query: create table T7 (a string, b int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T7 (a string, b int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T7
+#### A masked pattern was here ####
+PREHOOK: type: ALTERTABLE_LOCATION
+PREHOOK: Input: default@t7
+PREHOOK: Output: default@t7
+#### A masked pattern was here ####
+POSTHOOK: type: ALTERTABLE_LOCATION
+POSTHOOK: Input: default@t7
+POSTHOOK: Output: default@t7
+#### A masked pattern was here ####
+PREHOOK: query: alter table T2 touch
+PREHOOK: type: ALTERTABLE_TOUCH
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: alter table T2 touch
+POSTHOOK: type: ALTERTABLE_TOUCH
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+PREHOOK: query: alter table T4 touch partition (ds='tomorrow')
+PREHOOK: type: ALTERTABLE_TOUCH
+PREHOOK: Input: default@t4
+PREHOOK: Output: default@t4@ds=tomorrow
+POSTHOOK: query: alter table T4 touch partition (ds='tomorrow')
+POSTHOOK: type: ALTERTABLE_TOUCH
+POSTHOOK: Input: default@t4
+POSTHOOK: Input: default@t4@ds=tomorrow
+POSTHOOK: Output: default@t4@ds=tomorrow
+PREHOOK: query: create view V1 as select key from T1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@t1
+POSTHOOK: query: create view V1 as select key from T1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@V1
+PREHOOK: query: alter view V1 set tblproperties ('test'='thisisatest')
+PREHOOK: type: ALTERVIEW_PROPERTIES
+PREHOOK: Input: default@v1
+PREHOOK: Output: default@v1
+POSTHOOK: query: alter view V1 set tblproperties ('test'='thisisatest')
+POSTHOOK: type: ALTERVIEW_PROPERTIES
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: default@v1
+PREHOOK: query: drop view V1
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@v1
+PREHOOK: Output: default@v1
+POSTHOOK: query: drop view V1
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: default@v1
+PREHOOK: query: drop table T1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
+POSTHOOK: query: drop table T1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
+PREHOOK: query: drop table T2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: drop table T2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+PREHOOK: query: drop table newT3
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@newt3
+PREHOOK: Output: default@newt3
+POSTHOOK: query: drop table newT3
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@newt3
+POSTHOOK: Output: default@newt3