You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by rh...@apache.org on 2014/03/26 17:52:04 UTC

svn commit: r1581931 - in /hive/trunk: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/ ql/src/java/org/apache/hadoop/hive/ql/metadata/ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/

Author: rhbutani
Date: Wed Mar 26 16:52:03 2014
New Revision: 1581931

URL: http://svn.apache.org/r1581931
Log:
HIVE-6499 Using Metastore-side Auth errors on non-resolvable IF/OF/SerDe (Sushanth Sowmyan via Thejas Nair)

Modified:
    hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java

Modified: hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java?rev=1581931&r1=1581930&r2=1581931&view=diff
==============================================================================
--- hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java (original)
+++ hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java Wed Mar 26 16:52:03 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.securi
 import java.io.IOException;
 import java.net.ServerSocket;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 
 import junit.framework.TestCase;
@@ -31,6 +32,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext;
@@ -114,7 +116,7 @@ public class TestAuthorizationPreEventLi
   }
 
   private void validateAddPartition(Partition expectedPartition, Partition actualPartition) {
-    validatePartition(expectedPartition,actualPartition);
+    validatePartition(expectedPartition, actualPartition);
   }
 
   private void validatePartition(Partition expectedPartition, Partition actualPartition) {
@@ -254,7 +256,6 @@ public class TestAuthorizationPreEventLi
         renamedTable);
     assertFalse(tbl.getTableName().equals(renamedTable.getTableName()));
 
-
     //change the table name back
     driver.run(String.format("alter table %s rename to %s", renamed, tblName));
     listSize++;
@@ -281,6 +282,60 @@ public class TestAuthorizationPreEventLi
 
     validateDropTable(tbl, tableFromDropTableEvent);
 
+    // verify that we can create a table with IF/OF to some custom non-existent format
+    Table tCustom = tbl.deepCopy();
+    tCustom.getSd().setInputFormat("org.apache.hive.dummy.DoesNotExistInputFormat");
+    tCustom.getSd().setOutputFormat("org.apache.hive.dummy.DoesNotExistOutputFormat");
+    if (tCustom.getSd().getSerdeInfo() == null){
+      tCustom.getSd().setSerdeInfo(
+          new SerDeInfo(
+              "dummy"
+              ,"org.apache.hive.dummy.DoesNotExistSerDe"
+              , new HashMap<String,String>()
+          )
+      );
+    } else {
+      tCustom.getSd().getSerdeInfo().setSerializationLib(
+          "org.apache.hive.dummy.DoesNotExistSerDe");
+    }
+
+    tCustom.setTableName(tbl.getTableName() + "_custom");
+    msc.createTable(tCustom);
+    listSize++;
+
+    Table customCreatedTable = msc.getTable(tCustom.getDbName(), tCustom.getTableName());
+    Table customCreatedTableFromEvent = (
+        (org.apache.hadoop.hive.ql.metadata.Table)
+            assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+                DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.TABLE))
+        .getTTable();
+
+    validateCreateTable(tCustom,customCreatedTable);
+    validateCreateTable(tCustom,customCreatedTableFromEvent);
+
+    assertEquals(tCustom.getSd().getInputFormat(),
+        customCreatedTable.getSd().getInputFormat());
+    assertEquals(tCustom.getSd().getOutputFormat(),
+        customCreatedTable.getSd().getOutputFormat());
+    assertEquals(tCustom.getSd().getSerdeInfo().getSerializationLib(),
+        customCreatedTable.getSd().getSerdeInfo().getSerializationLib());
+    assertEquals(tCustom.getSd().getInputFormat(),
+        customCreatedTableFromEvent.getSd().getInputFormat());
+    assertEquals(tCustom.getSd().getOutputFormat(),
+        customCreatedTableFromEvent.getSd().getOutputFormat());
+    assertEquals(tCustom.getSd().getSerdeInfo().getSerializationLib(),
+        customCreatedTableFromEvent.getSd().getSerdeInfo().getSerializationLib());
+
+    msc.dropTable(tCustom.getDbName(),tCustom.getTableName());
+    listSize++;
+    Table table2FromDropTableEvent = (
+        (org.apache.hadoop.hive.ql.metadata.Table)
+            assertAndExtractSingleObjectFromEvent(listSize, authCalls,
+                DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.TABLE))
+        .getTTable();
+
+    validateDropTable(tCustom, table2FromDropTableEvent);
+
     driver.run("drop database " + dbName);
     listSize++;
     Database dbFromDropDatabaseEvent =

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1581931&r1=1581930&r2=1581931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Wed Mar 26 16:52:03 2014
@@ -178,7 +178,7 @@ public class Partition implements Serial
    * @throws HiveException
    *           Thrown if we cannot initialize the partition
    */
-  private void initialize(Table table,
+  protected void initialize(Table table,
       org.apache.hadoop.hive.metastore.api.Partition tPartition) throws HiveException {
 
     this.table = table;
@@ -213,11 +213,13 @@ public class Partition implements Serial
       }
     }
 
-    // This will set up field: inputFormatClass
-    getInputFormatClass();
-    // This will set up field: outputFormatClass
-    getOutputFormatClass();
-    getDeserializer();
+    // Note that we do not set up fields like inputFormatClass, outputFormatClass
+    // and deserializer because the Partition needs to be accessed from across
+    // the metastore side as well, which will result in attempting to load
+    // the class associated with them, which might not be available, and
+    // the main reason to instantiate them would be to pre-cache them for
+    // performance. Since those fields are null/cache-check by their accessors
+    // anyway, that's not a concern.
   }
 
   public String getName() {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java?rev=1581931&r1=1581930&r2=1581931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java Wed Mar 26 16:52:03 2014
@@ -96,13 +96,19 @@ public class Table implements Serializab
   }
 
   public Table(org.apache.hadoop.hive.metastore.api.Table table) {
+    initialize(table);
+  }
+
+  // Do initialization here, so as to keep the ctor minimal.
+  protected void initialize(org.apache.hadoop.hive.metastore.api.Table table) {
     tTable = table;
-    if (!isView()) {
-      // This will set up field: inputFormatClass
-      getInputFormatClass();
-      // This will set up field: outputFormatClass
-      getOutputFormatClass();
-    }
+    // Note that we do not set up fields like inputFormatClass, outputFormatClass
+    // and deserializer because the Partition needs to be accessed from across
+    // the metastore side as well, which will result in attempting to load
+    // the class associated with them, which might not be available, and
+    // the main reason to instantiate them would be to pre-cache them for
+    // performance. Since those fields are null/cache-check by their accessors
+    // anyway, that's not a concern.
   }
 
   public Table(String databaseName, String tableName) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java?rev=1581931&r1=1581930&r2=1581931&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java Wed Mar 26 16:52:03 2014
@@ -41,8 +41,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.security.HiveMetastoreAuthenticationProvider;
 
@@ -185,7 +183,7 @@ public class AuthorizationPreEventListen
   private void authorizeCreateTable(PreCreateTableEvent context)
       throws InvalidOperationException, MetaException {
     try {
-      tAuthorizer.get().authorize(getTableFromApiTable(context.getTable()),
+      tAuthorizer.get().authorize(new TableWrapper(context.getTable()),
           HiveOperation.CREATETABLE.getInputRequiredPrivileges(),
           HiveOperation.CREATETABLE.getOutputRequiredPrivileges());
     } catch (AuthorizationException e) {
@@ -198,7 +196,7 @@ public class AuthorizationPreEventListen
   private void authorizeDropTable(PreDropTableEvent context)
       throws InvalidOperationException, MetaException {
     try {
-      tAuthorizer.get().authorize(getTableFromApiTable(context.getTable()),
+      tAuthorizer.get().authorize(new TableWrapper(context.getTable()),
           HiveOperation.DROPTABLE.getInputRequiredPrivileges(),
           HiveOperation.DROPTABLE.getOutputRequiredPrivileges());
     } catch (AuthorizationException e) {
@@ -211,7 +209,7 @@ public class AuthorizationPreEventListen
   private void authorizeAlterTable(PreAlterTableEvent context)
       throws InvalidOperationException, MetaException {
     try {
-      tAuthorizer.get().authorize(getTableFromApiTable(context.getOldTable()),
+      tAuthorizer.get().authorize(new TableWrapper(context.getOldTable()),
           null,
           new Privilege[]{Privilege.ALTER_METADATA});
     } catch (AuthorizationException e) {
@@ -225,7 +223,7 @@ public class AuthorizationPreEventListen
       throws InvalidOperationException, MetaException {
     try {
       for (org.apache.hadoop.hive.metastore.api.Partition mapiPart : context.getPartitions()) {
-        tAuthorizer.get().authorize(getPartitionFromApiPartition(mapiPart, context),
+        tAuthorizer.get().authorize(new PartitionWrapper(mapiPart, context),
             HiveOperation.ALTERTABLE_ADDPARTS.getInputRequiredPrivileges(),
             HiveOperation.ALTERTABLE_ADDPARTS.getOutputRequiredPrivileges());
       }
@@ -242,7 +240,7 @@ public class AuthorizationPreEventListen
       throws InvalidOperationException, MetaException {
     try {
       org.apache.hadoop.hive.metastore.api.Partition mapiPart = context.getPartition();
-      tAuthorizer.get().authorize(getPartitionFromApiPartition(mapiPart, context),
+      tAuthorizer.get().authorize(new PartitionWrapper(mapiPart, context),
           HiveOperation.ALTERTABLE_DROPPARTS.getInputRequiredPrivileges(),
           HiveOperation.ALTERTABLE_DROPPARTS.getOutputRequiredPrivileges());
     } catch (AuthorizationException e) {
@@ -258,7 +256,7 @@ public class AuthorizationPreEventListen
       throws InvalidOperationException, MetaException {
     try {
       org.apache.hadoop.hive.metastore.api.Partition mapiPart = context.getNewPartition();
-      tAuthorizer.get().authorize(getPartitionFromApiPartition(mapiPart, context),
+      tAuthorizer.get().authorize(new PartitionWrapper(mapiPart, context),
           null,
           new Privilege[]{Privilege.ALTER_METADATA});
     } catch (AuthorizationException e) {
@@ -270,41 +268,6 @@ public class AuthorizationPreEventListen
     }
   }
 
-  private Table getTableFromApiTable(org.apache.hadoop.hive.metastore.api.Table apiTable) {
-    org.apache.hadoop.hive.metastore.api.Table tTable = apiTable.deepCopy();
-    if (tTable.getTableType() == null){
-      // TableType specified was null, we need to figure out what type it was.
-      if (MetaStoreUtils.isExternalTable(tTable)){
-        tTable.setTableType(TableType.EXTERNAL_TABLE.toString());
-      } else if (MetaStoreUtils.isIndexTable(tTable)) {
-        tTable.setTableType(TableType.INDEX_TABLE.toString());
-      } else if ((tTable.getSd() == null) || (tTable.getSd().getLocation() == null)) {
-        tTable.setTableType(TableType.VIRTUAL_VIEW.toString());
-      } else {
-        tTable.setTableType(TableType.MANAGED_TABLE.toString());
-      }
-    }
-    Table tbl = new Table(tTable);
-    return tbl;
-  }
-
-  private Partition getPartitionFromApiPartition(
-      org.apache.hadoop.hive.metastore.api.Partition mapiPart,
-      PreEventContext context) throws HiveException, NoSuchObjectException, MetaException {
-    org.apache.hadoop.hive.metastore.api.Partition tPart = mapiPart.deepCopy();
-    org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table(
-        mapiPart.getDbName(), mapiPart.getTableName());
-    if (tPart.getSd() == null){
-      // In the cases of create partition, by the time this event fires, the partition
-      // object has not yet come into existence, and thus will not yet have a
-      // location or an SD, but these are needed to create a ql.metadata.Partition,
-      // so we use the table's SD. The only place this is used is by the
-      // authorization hooks, so we will not affect code flow in the metastore itself.
-      tPart.setSd(t.getSd());
-    }
-    return new Partition(getTableFromApiTable(t),tPart);
-  }
-
   private InvalidOperationException invalidOperationException(Exception e) {
     InvalidOperationException ex = new InvalidOperationException(e.getMessage());
     ex.initCause(e.getCause());
@@ -317,4 +280,50 @@ public class AuthorizationPreEventListen
     return ex;
   }
 
+  // Wrapper extends ql.metadata.Table for easy construction syntax
+  public static class TableWrapper extends org.apache.hadoop.hive.ql.metadata.Table {
+
+    public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) {
+      org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy();
+      if (wrapperApiTable.getTableType() == null){
+        // TableType specified was null, we need to figure out what type it was.
+        if (MetaStoreUtils.isExternalTable(wrapperApiTable)){
+          wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString());
+        } else if (MetaStoreUtils.isIndexTable(wrapperApiTable)) {
+          wrapperApiTable.setTableType(TableType.INDEX_TABLE.toString());
+        } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) {
+          wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString());
+        } else {
+          wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString());
+        }
+      }
+      initialize(wrapperApiTable);
+    }
+  }
+
+  // Wrapper extends ql.metadata.Partition for easy construction syntax
+  public static class PartitionWrapper extends org.apache.hadoop.hive.ql.metadata.Partition {
+
+    public PartitionWrapper(org.apache.hadoop.hive.ql.metadata.Table table,
+        org.apache.hadoop.hive.metastore.api.Partition mapiPart) throws HiveException {
+      initialize(table,mapiPart);
+    }
+
+    public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart,
+        PreEventContext context) throws HiveException, NoSuchObjectException, MetaException {
+      org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = mapiPart.deepCopy();
+      org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table(
+          mapiPart.getDbName(), mapiPart.getTableName());
+      if (wrapperApiPart.getSd() == null){
+        // In the cases of create partition, by the time this event fires, the partition
+        // object has not yet come into existence, and thus will not yet have a
+        // location or an SD, but these are needed to create a ql.metadata.Partition,
+        // so we use the table's SD. The only place this is used is by the
+        // authorization hooks, so we will not affect code flow in the metastore itself.
+        wrapperApiPart.setSd(t.getSd());
+      }
+      initialize(new TableWrapper(t),wrapperApiPart);
+    }
+  }
+
 }