You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@sentry.apache.org by sr...@apache.org on 2014/06/10 00:00:21 UTC

git commit: SENTRY-259: Implement Hive metastore plugin ( Prasad Mujumdar via Sravya Tirukkovalur)

Repository: incubator-sentry
Updated Branches:
  refs/heads/master 9afc663d0 -> c04138d38


SENTRY-259: Implement Hive metastore plugin ( Prasad Mujumdar via Sravya Tirukkovalur)


Project: http://git-wip-us.apache.org/repos/asf/incubator-sentry/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-sentry/commit/c04138d3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-sentry/tree/c04138d3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-sentry/diff/c04138d3

Branch: refs/heads/master
Commit: c04138d38fc4cb6ba23d8220bb40964fed653cc2
Parents: 9afc663
Author: Sravya Tirukkovalur <sr...@clouera.com>
Authored: Mon Jun 9 14:59:45 2014 -0700
Committer: Sravya Tirukkovalur <sr...@clouera.com>
Committed: Mon Jun 9 14:59:45 2014 -0700

----------------------------------------------------------------------
 .../binding/hive/authz/HiveAuthzBinding.java    |   2 +-
 .../sentry/binding/hive/conf/HiveAuthzConf.java |   1 +
 .../metastore/MetastoreAuthzBinding.java        | 338 ++++++++++++++++
 .../sentry/service/thrift/SentryService.java    |   2 +-
 .../thrift/SentryServiceIntegrationBase.java    |   1 -
 .../e2e/dbprovider/PolicyProviderForTest.java   |   2 +-
 .../TestDbSentryOnFailureHookLoading.java       |  12 +-
 .../AbstractTestWithStaticConfiguration.java    |  13 +-
 .../apache/sentry/tests/e2e/hive/Context.java   |  26 +-
 .../hive/TestSentryOnFailureHookLoading.java    |   4 +-
 .../sentry/tests/e2e/hive/fs/MiniDFS.java       |  32 +-
 .../e2e/hive/hiveserver/HiveServerFactory.java  |  44 ++-
 .../hiveserver/InternalMetastoreServer.java     |  81 ++++
 ...actMetastoreTestWithStaticConfiguration.java | 156 ++++++++
 .../e2e/metastore/TestMetastoreEndToEnd.java    | 387 +++++++++++++++++++
 .../src/test/resources/core-site.xml            |  33 ++
 16 files changed, 1108 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzBinding.java
----------------------------------------------------------------------
diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzBinding.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzBinding.java
index 63484a8..39f5384 100644
--- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzBinding.java
+++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/authz/HiveAuthzBinding.java
@@ -258,7 +258,7 @@ public class HiveAuthzBinding {
             requiredOutputPrivileges.get(getAuthzType(outputHierarchy));
           if (!authProvider.hasAccess(subject, outputHierarchy, outputPrivSet, activeRoleSet)) {
             throw new AuthorizationException("User " + subject.getName() +
-                " does not have priviliedges for " + hiveOp.name());
+                " does not have privileges for " + hiveOp.name());
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
----------------------------------------------------------------------
diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
index 7b7bf8e..c126743 100644
--- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
+++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
@@ -63,6 +63,7 @@ public class HiveAuthzConf extends Configuration {
     AUTHZ_UDF_WHITELIST("sentry.hive.udf.whitelist", HIVE_UDF_WHITE_LIST),
     AUTHZ_ALLOW_HIVE_IMPERSONATION("sentry.hive.allow.hive.impersonation", "false"),
     AUTHZ_ONFAILURE_HOOKS("sentry.hive.failure.hooks", ""),
+    AUTHZ_METASTORE_SERVICE_USERS("sentry.metastore.service.users", ""),
 
     AUTHZ_PROVIDER_DEPRECATED("hive.sentry.provider",
       "org.apache.sentry.provider.file.ResourceAuthorizationProvider"),

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBinding.java
----------------------------------------------------------------------
diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBinding.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBinding.java
new file mode 100644
index 0000000..2737793
--- /dev/null
+++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBinding.java
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.binding.metastore;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.MetaStorePreEventListener;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreEventContext;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
+import org.apache.hadoop.hive.ql.plan.HiveOperation;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.sentry.binding.hive.authz.HiveAuthzBinding;
+import org.apache.sentry.binding.hive.authz.HiveAuthzPrivilegesMap;
+import org.apache.sentry.binding.hive.conf.HiveAuthzConf;
+import org.apache.sentry.binding.hive.conf.HiveAuthzConf.AuthzConfVars;
+import org.apache.sentry.core.common.Subject;
+import org.apache.sentry.core.model.db.AccessURI;
+import org.apache.sentry.core.model.db.DBModelAuthorizable;
+import org.apache.sentry.core.model.db.Database;
+import org.apache.sentry.core.model.db.Server;
+import org.apache.sentry.core.model.db.Table;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+
+/**
+ * Sentry binding for Hive Metastore. The binding is integrated into Metastore
+ * via the pre-event listener which are fired prior to executing the metadata
+ * action. This point we are only authorizing metadata writes since the listners
+ * are not fired from read events. Each action builds a input and output
+ * hierarchy as per the objects used in the given operations. This is then
+ * passed down to the hive binding which handles the authorization. This ensures
+ * that we follow the same privilege model and policies.
+ */
+public class MetastoreAuthzBinding extends MetaStorePreEventListener {
+
+  /**
+   * Build the set of object hierarchies ie fully qualified db model objects
+   */
+  private static class HierarcyBuilder {
+    private List<List<DBModelAuthorizable>> authHierarchy;
+
+    public HierarcyBuilder() {
+      authHierarchy = new ArrayList<List<DBModelAuthorizable>>();
+    }
+
+    public HierarcyBuilder addServerToOutput(Server server) {
+      List<DBModelAuthorizable> serverHierarchy = new ArrayList<DBModelAuthorizable>();
+      serverHierarchy.add(server);
+      authHierarchy.add(serverHierarchy);
+      return this;
+    }
+
+    public HierarcyBuilder addDbToOutput(Server server, String dbName) {
+      List<DBModelAuthorizable> dbHierarchy = new ArrayList<DBModelAuthorizable>();
+      dbHierarchy.add(server);
+      dbHierarchy.add(new Database(dbName));
+      authHierarchy.add(dbHierarchy);
+      return this;
+    }
+
+    public HierarcyBuilder addUriToOutput(Server server, String uriPath) {
+      List<DBModelAuthorizable> uriHierarchy = new ArrayList<DBModelAuthorizable>();
+      uriHierarchy.add(server);
+      uriHierarchy.add(new AccessURI(uriPath));
+      authHierarchy.add(uriHierarchy);
+      return this;
+    }
+
+    public HierarcyBuilder addTableToOutput(Server server, String dbName,
+        String tableName) {
+      List<DBModelAuthorizable> tableHierarchy = new ArrayList<DBModelAuthorizable>();
+      tableHierarchy.add(server);
+      tableHierarchy.add(new Database(dbName));
+      tableHierarchy.add(new Table(tableName));
+      authHierarchy.add(tableHierarchy);
+      return this;
+    }
+
+    public List<List<DBModelAuthorizable>> build() {
+      return authHierarchy;
+    }
+  }
+
+  private HiveAuthzConf authzConf;
+  private final Server authServer;
+  private final HiveConf hiveConf;
+  private final ImmutableSet<String> serviceUsers;
+  private HiveAuthzBinding hiveAuthzBinding;
+
+  public MetastoreAuthzBinding(Configuration config) throws Exception {
+    super(config);
+    String hiveAuthzConf = config.get(HiveAuthzConf.HIVE_SENTRY_CONF_URL);
+    if (hiveAuthzConf == null
+        || (hiveAuthzConf = hiveAuthzConf.trim()).isEmpty()) {
+      throw new IllegalArgumentException("Configuration key "
+          + HiveAuthzConf.HIVE_SENTRY_CONF_URL + " value '" + hiveAuthzConf
+          + "' is invalid.");
+    }
+    try {
+      authzConf = new HiveAuthzConf(new URL(hiveAuthzConf));
+    } catch (MalformedURLException e) {
+      throw new IllegalArgumentException("Configuration key "
+          + HiveAuthzConf.HIVE_SENTRY_CONF_URL + " specifies a malformed URL '"
+          + hiveAuthzConf + "'", e);
+    }
+    hiveConf = new HiveConf(config, this.getClass());
+    this.authServer = new Server(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME
+        .getVar()));
+    serviceUsers = ImmutableSet.copyOf(toTrimedLower(Sets.newHashSet(authzConf
+        .getStrings(AuthzConfVars.AUTHZ_METASTORE_SERVICE_USERS.getVar(),
+            new String[] { "" }))));
+  }
+
+  /**
+   * Main listener callback which is the entry point for Sentry
+   */
+  @Override
+  public void onEvent(PreEventContext context) throws MetaException,
+      NoSuchObjectException, InvalidOperationException {
+
+    switch (context.getEventType()) {
+    case CREATE_TABLE:
+      authorizeCreateTable((PreCreateTableEvent) context);
+      break;
+    case DROP_TABLE:
+      authorizeDropTable((PreDropTableEvent) context);
+      break;
+    case ALTER_TABLE:
+      authorizeAlterTable((PreAlterTableEvent) context);
+      break;
+    case ADD_PARTITION:
+      authorizeAddPartition((PreAddPartitionEvent) context);
+      break;
+    case DROP_PARTITION:
+      authorizeDropPartition((PreDropPartitionEvent) context);
+      break;
+    case ALTER_PARTITION:
+      authorizeAlterPartition((PreAlterPartitionEvent) context);
+      break;
+    case CREATE_DATABASE:
+      authorizeCreateDatabase((PreCreateDatabaseEvent) context);
+      break;
+    case DROP_DATABASE:
+      authorizeDropDatabase((PreDropDatabaseEvent) context);
+      break;
+    case LOAD_PARTITION_DONE:
+      // noop for now
+      break;
+    default:
+      break;
+    }
+  }
+
+  private void authorizeCreateDatabase(PreCreateDatabaseEvent context)
+      throws InvalidOperationException, MetaException {
+    authorizeMetastoreAccess(HiveOperation.CREATEDATABASE,
+        new HierarcyBuilder().build(),
+        new HierarcyBuilder().addServerToOutput(getAuthServer()).build());
+  }
+
+  private void authorizeDropDatabase(PreDropDatabaseEvent context)
+      throws InvalidOperationException, MetaException {
+    authorizeMetastoreAccess(HiveOperation.DROPDATABASE,
+        new HierarcyBuilder().build(),
+        new HierarcyBuilder().addServerToOutput(getAuthServer()).build());
+  }
+
+  private void authorizeCreateTable(PreCreateTableEvent context)
+      throws InvalidOperationException, MetaException {
+    HierarcyBuilder inputBuilder = new HierarcyBuilder();
+    if (!StringUtils.isEmpty(context.getTable().getSd().getLocation())) {
+      inputBuilder.addUriToOutput(getAuthServer(), context.getTable().getSd()
+          .getLocation());
+    }
+    authorizeMetastoreAccess(HiveOperation.CREATETABLE, inputBuilder.build(),
+        new HierarcyBuilder().addDbToOutput(
+            getAuthServer(), context.getTable().getDbName()).build());
+  }
+
+  private void authorizeDropTable(PreDropTableEvent context)
+      throws InvalidOperationException, MetaException {
+    authorizeMetastoreAccess(
+        HiveOperation.DROPTABLE,
+        new HierarcyBuilder().build(),
+        new HierarcyBuilder().addDbToOutput(getAuthServer(),
+            context.getTable().getDbName()).build());
+  }
+
+  private void authorizeAlterTable(PreAlterTableEvent context)
+      throws InvalidOperationException, MetaException {
+
+    HierarcyBuilder inputBuilder = new HierarcyBuilder();
+    // if the operation requires location change, then add URI privilege check
+    if (context.getOldTable().getSd().getLocation()
+        .compareTo(context.getNewTable().getSd().getLocation()) != 0) {
+      inputBuilder.addUriToOutput(getAuthServer(), context.getNewTable()
+          .getSd().getLocation());
+    }
+    authorizeMetastoreAccess(
+        HiveOperation.ALTERTABLE_ADDCOLS, inputBuilder.build(),
+        new HierarcyBuilder().addDbToOutput(getAuthServer(),
+            context.getOldTable().getDbName()).build());
+  }
+
+  private void authorizeAddPartition(PreAddPartitionEvent context)
+      throws InvalidOperationException, MetaException {
+    // check if we need to validate URI permissions when storage location is
+    // non-default
+    HierarcyBuilder inputBuilder = new HierarcyBuilder();
+    if (!StringUtils.isEmpty(context.getPartition().getSd().getLocation())) {
+      inputBuilder.addUriToOutput(getAuthServer(), context.getPartition()
+          .getSd().getLocation());
+    }
+    authorizeMetastoreAccess(HiveOperation.ALTERTABLE_ADDPARTS,
+        inputBuilder.build(),
+        new HierarcyBuilder().addDbToOutput(getAuthServer(),
+            context.getPartition().getDbName()).build());
+  }
+
+  private void authorizeDropPartition(PreDropPartitionEvent context)
+      throws InvalidOperationException, MetaException {
+    authorizeMetastoreAccess(
+        HiveOperation.ALTERTABLE_DROPPARTS,
+        new HierarcyBuilder().build(),
+        new HierarcyBuilder().addDbToOutput(getAuthServer(),
+            context.getPartition().getDbName()).build());
+  }
+
+  private void authorizeAlterPartition(PreAlterPartitionEvent context)
+      throws InvalidOperationException, MetaException {
+    authorizeMetastoreAccess(
+        HiveOperation.ALTERPARTITION_LOCATION,
+        new HierarcyBuilder().build(),
+        new HierarcyBuilder()
+            .addDbToOutput(getAuthServer(),
+            context.getNewPartition().getDbName()).build());
+  }
+
+  private InvalidOperationException invalidOperationException(Exception e) {
+    InvalidOperationException ex = new InvalidOperationException(e.getMessage());
+    ex.initCause(e.getCause());
+    return ex;
+  }
+
+  /**
+   * Assemble the required privileges and requested privileges. Validate using
+   * Hive bind auth provider
+   * @param hiveOp
+   * @param inputHierarchy
+   * @param outputHierarchy
+   * @throws InvalidOperationException
+   */
+  private void authorizeMetastoreAccess(HiveOperation hiveOp,
+      List<List<DBModelAuthorizable>> inputHierarchy,
+      List<List<DBModelAuthorizable>> outputHierarchy)
+      throws InvalidOperationException {
+    try {
+      HiveAuthzBinding hiveAuthzBinding = getHiveAuthzBinding();
+      String userName = ShimLoader.getHadoopShims().getUGIForConf(hiveConf)
+          .getShortUserName();
+      if (needsAuthorization(userName)) {
+        hiveAuthzBinding.authorize(hiveOp, HiveAuthzPrivilegesMap
+            .getHiveAuthzPrivileges(hiveOp), new Subject(userName),
+        inputHierarchy, outputHierarchy);
+      }
+    } catch (AuthorizationException e1) {
+      throw invalidOperationException(e1);
+    } catch (LoginException e1) {
+      throw invalidOperationException(e1);
+    } catch (IOException e1) {
+      throw invalidOperationException(e1);
+    } catch (Exception e) {
+      throw invalidOperationException(e);
+    }
+
+  }
+
+  public Server getAuthServer() {
+    return authServer;
+  }
+
+  private boolean needsAuthorization(String userName) {
+    return !serviceUsers.contains(userName);
+  }
+
+  private static Set<String> toTrimedLower(Set<String> s) {
+    Set<String> result = Sets.newHashSet();
+    for (String v : s) {
+      result.add(v.trim().toLowerCase());
+    }
+    return result;
+  }
+
+  private HiveAuthzBinding getHiveAuthzBinding() throws Exception {
+    if (hiveAuthzBinding == null) {
+      hiveAuthzBinding = new HiveAuthzBinding(hiveConf, authzConf);
+    }
+    return hiveAuthzBinding;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java
index f9928df..33e51de 100644
--- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java
+++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java
@@ -163,7 +163,7 @@ public class SentryService implements Callable {
       }
     } catch (Exception t) {
       LOGGER.error("Error starting server", t);
-      throw t;
+      throw new Exception("Error starting server", t);
     }finally {
       status = Status.NOT_STARTED;
       if (loginContext != null) {

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/service/thrift/SentryServiceIntegrationBase.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/service/thrift/SentryServiceIntegrationBase.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/service/thrift/SentryServiceIntegrationBase.java
index 66d6eef..61bdfed 100644
--- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/service/thrift/SentryServiceIntegrationBase.java
+++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/service/thrift/SentryServiceIntegrationBase.java
@@ -18,7 +18,6 @@
 
 package org.apache.sentry.service.thrift;
 import java.io.File;
-import java.net.UnknownHostException;
 import java.security.PrivilegedExceptionAction;
 import java.util.HashSet;
 import java.util.Set;

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/PolicyProviderForTest.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/PolicyProviderForTest.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/PolicyProviderForTest.java
index 8e8db72..47ce66d 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/PolicyProviderForTest.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/PolicyProviderForTest.java
@@ -51,7 +51,7 @@ public class PolicyProviderForTest extends PolicyFile {
     return sentryClient;
   }
 
-  protected static void setSentryClient(
+  public static void setSentryClient(
       SentryPolicyServiceClient newSentryClient) {
     sentryClient = newSentryClient;
   }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSentryOnFailureHookLoading.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSentryOnFailureHookLoading.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSentryOnFailureHookLoading.java
index a8ce2a2..0d3b820 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSentryOnFailureHookLoading.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestDbSentryOnFailureHookLoading.java
@@ -17,14 +17,9 @@
 
 package org.apache.sentry.tests.e2e.dbprovider;
 
-import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.sentry.provider.db.SentryAccessDeniedException;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-import java.io.File;
-import java.io.FileOutputStream;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.sql.Statement;
@@ -35,6 +30,7 @@ import junit.framework.Assert;
 
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.sentry.binding.hive.conf.HiveAuthzConf;
+import org.apache.sentry.provider.db.SentryAccessDeniedException;
 import org.apache.sentry.provider.file.PolicyFile;
 import org.apache.sentry.tests.e2e.hive.DummySentryOnFailureHook;
 import org.apache.sentry.tests.e2e.hive.StaticUserGroup;
@@ -43,8 +39,6 @@ import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
 
-import com.google.common.io.Resources;
-
 public class TestDbSentryOnFailureHookLoading extends AbstractTestWithDbProvider {
 
   private PolicyFile policyFile;
@@ -67,8 +61,8 @@ public class TestDbSentryOnFailureHookLoading extends AbstractTestWithDbProvider
     String hiveServer2Type = System
         .getProperty(HiveServerFactory.HIVESERVER2_TYPE);
     if(hiveServer2Type != null) {
-      Assume.assumeTrue(HiveServerFactory.HiveServer2Type.valueOf(hiveServer2Type.trim()) ==
-              HiveServerFactory.HiveServer2Type.InternalHiveServer2);
+      Assume.assumeTrue(HiveServerFactory.isInternalServer(
+          HiveServerFactory.HiveServer2Type.valueOf(hiveServer2Type.trim())));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java
index b6bb09c..952b021 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java
@@ -93,6 +93,7 @@ public abstract class AbstractTestWithStaticConfiguration {
 
   protected static boolean policy_on_hdfs = false;
   protected static boolean useSentryService = false;
+  protected static String testServerType = null;
 
   protected static File baseDir;
   protected static File logDir;
@@ -158,12 +159,14 @@ public abstract class AbstractTestWithStaticConfiguration {
   }
 
   @BeforeClass
-  public static void setupTestStaticConfiguration()
-      throws Exception {
+  public static void setupTestStaticConfiguration() throws Exception {
+    properties = Maps.newHashMap();
     if(!policy_on_hdfs) {
       policy_on_hdfs = new Boolean(System.getProperty("sentry.e2etest.policyonhdfs", "false"));
     }
-    properties = Maps.newHashMap();
+    if (testServerType != null) {
+      properties.put("sentry.e2etest.hiveServer2Type", testServerType);
+    }
     baseDir = Files.createTempDir();
     LOGGER.info("BaseDir = " + baseDir);
     logDir = assertCreateDir(new File(baseDir, "log"));
@@ -176,11 +179,14 @@ public abstract class AbstractTestWithStaticConfiguration {
     fileSystem = dfs.getFileSystem();
 
     String policyURI;
+    PolicyFile policyFile = PolicyFile.setAdminOnServer1(ADMIN1);
+    policyFile.write(policyFileLocation);
     if (policy_on_hdfs) {
       String dfsUri = fileSystem.getDefaultUri(fileSystem.getConf()).toString();
       LOGGER.error("dfsUri " + dfsUri);
       policyURI = dfsUri + System.getProperty("sentry.e2etest.hive.policy.location", "/user/hive/sentry");
       policyURI += "/" + HiveServerFactory.AUTHZ_PROVIDER_FILENAME;
+      dfs.writePolicyFile(policyFileLocation);
     } else {
       policyURI = policyFileLocation.getPath();
     }
@@ -201,7 +207,6 @@ public abstract class AbstractTestWithStaticConfiguration {
   }
 
   private static void setupSentryService() throws Exception {
-    properties = Maps.newHashMap();
     sentryConf = new Configuration(false);
     PolicyFile policyFile = new PolicyFile();
 

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/Context.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/Context.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/Context.java
index d8f5256..83fe397 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/Context.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/Context.java
@@ -28,6 +28,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
@@ -37,7 +38,11 @@ import java.util.Set;
 import junit.framework.Assert;
 
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.sentry.provider.db.SentryAccessDeniedException;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -55,6 +60,7 @@ public class Context {
   public static final String AUTHZ_EXCEPTION_SQL_STATE = "42000";
   public static final String AUTHZ_LINK_FAILURE_SQL_STATE = "08S01";
   public static final String AUTHZ_EXCEPTION_ERROR_MSG = "No valid privileges";
+  private static final String METASTORE_AUTH_ERROR_MSG = "does not have privileges";
 
   private final HiveServer hiveServer;
   private final FileSystem fileSystem;
@@ -232,6 +238,19 @@ public class Context {
     return hiveServer.getURL();
   }
 
+  // TODO: Handle kerberos login
+  public HiveMetaStoreClient getMetaStoreClient(String userName) throws Exception {
+    UserGroupInformation clientUgi = UserGroupInformation.createRemoteUser(userName);
+    HiveMetaStoreClient client = (HiveMetaStoreClient)ShimLoader.getHadoopShims()
+        .doAs(clientUgi, new PrivilegedExceptionAction<Object> () {
+          @Override
+          public HiveMetaStoreClient run() throws Exception {
+            return new HiveMetaStoreClient(new HiveConf());
+          }
+        });
+    return client;
+  }
+
   /**
    * Execute "set x" and extract value from key=val format result Verify the
    * extracted value
@@ -250,4 +269,9 @@ public class Context {
     assertEquals("Conf value should be set by execute()", expectedVal,
         resultValues[1]);
   }
+
+  public static void verifyMetastoreAuthException(MetaException e)
+      throws Exception {
+    assertTrue(e.getMessage().contains(METASTORE_AUTH_ERROR_MSG));
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSentryOnFailureHookLoading.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSentryOnFailureHookLoading.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSentryOnFailureHookLoading.java
index cae270b..ad27238 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSentryOnFailureHookLoading.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestSentryOnFailureHookLoading.java
@@ -74,8 +74,8 @@ public class TestSentryOnFailureHookLoading extends AbstractTestWithHiveServer {
     String hiveServer2Type = System.getProperty(
         HiveServerFactory.HIVESERVER2_TYPE);
     if (hiveServer2Type != null &&
-        HiveServerFactory.HiveServer2Type.valueOf(hiveServer2Type.trim()) !=
-        HiveServerFactory.HiveServer2Type.InternalHiveServer2) {
+        !HiveServerFactory.isInternalServer(HiveServerFactory.HiveServer2Type
+            .valueOf(hiveServer2Type.trim()))) {
       return;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java
index 184c066..a96a1ce 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java
@@ -16,21 +16,49 @@
  */
 package org.apache.sentry.tests.e2e.hive.fs;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+
 import junit.framework.Assert;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.GroupMappingServiceProvider;
 
-import java.io.File;
-import java.util.concurrent.TimeoutException;
+import com.google.common.collect.Lists;
 
 public class MiniDFS extends AbstractDFS {
+  // mock user group mapping that maps user to same group
+  public static class PseudoGroupMappingService implements
+      GroupMappingServiceProvider {
+
+    @Override
+    public List<String> getGroups(String user) {
+      return Lists.newArrayList(user, System.getProperty("user.name"));
+    }
+
+    @Override
+    public void cacheGroupsRefresh() throws IOException {
+      // no-op
+    }
+
+    @Override
+    public void cacheGroupsAdd(List<String> groups) throws IOException {
+      // no-op
+    }
+  }
+
   private static MiniDFSCluster dfsCluster;
 
   MiniDFS(File baseDir) throws Exception {
     Configuration conf = new Configuration();
     File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
     conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
+    conf.set("hadoop.security.group.mapping",
+        MiniDFS.PseudoGroupMappingService.class.getName());
+    Configuration.addDefaultResource("test.xml");
     dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     fileSystem = dfsCluster.getFileSystem();
     String policyDir = System.getProperty("sentry.e2etest.hive.policy.location", "/user/hive/sentry");

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java
index 19ff6cf..39162df 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java
@@ -25,10 +25,13 @@ import java.net.URL;
 import java.util.Map;
 
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.sentry.binding.hive.conf.HiveAuthzConf;
+import org.apache.sentry.binding.hive.conf.HiveAuthzConf.AuthzConfVars;
 import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider;
 import org.fest.reflect.core.Reflection;
 import org.junit.Assert;
@@ -58,7 +61,11 @@ public class HiveServerFactory {
   public static final String HADOOPBIN = ConfVars.HADOOPBIN.toString();
   public static final String DEFAULT_AUTHZ_SERVER_NAME = "server1";
   public static final String HIVESERVER2_IMPERSONATION = "hive.server2.enable.doAs";
-
+  public static final String METASTORE_URI = HiveConf.ConfVars.METASTOREURIS.varname;
+  public static final String METASTORE_HOOK = HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname;
+  public static final String METASTORE_SETUGI = HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI.varname;
+  public static final String METASTORE_BYPASS = AuthzConfVars.AUTHZ_METASTORE_SERVICE_USERS.getVar();
+  public static final String METASTORE_CLIENT_TIMEOUT = HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.varname;
 
   static {
     try {
@@ -71,8 +78,7 @@ public class HiveServerFactory {
 
   public static HiveServer create(Map<String, String> properties,
       File baseDir, File confDir, File logDir, String policyFile,
-      FileSystem fileSystem)
-          throws Exception {
+      FileSystem fileSystem) throws Exception {
     String type = properties.get(HIVESERVER2_TYPE);
     if(type == null) {
       type = System.getProperty(HIVESERVER2_TYPE);
@@ -98,8 +104,10 @@ public class HiveServerFactory {
         String dfsUri = fileSystem.getDefaultUri(fileSystem.getConf()).toString();
         LOGGER.error("dfsUri " + dfsUri);
         properties.put(WAREHOUSE_DIR, dfsUri + "/data");
+        fileSystem.mkdirs(new Path("/data/"), new FsPermission((short) 0777));
       } else {
         properties.put(WAREHOUSE_DIR, new File(baseDir, "warehouse").getPath());
+        fileSystem.mkdirs(new Path("/", "warehouse"), new FsPermission((short) 0777));
       }
     }
     if(!properties.containsKey(METASTORE_CONNECTION_URL)) {
@@ -129,6 +137,24 @@ public class HiveServerFactory {
     if(!properties.containsKey(HADOOPBIN)) {
       properties.put(HADOOPBIN, "./target/hadoop/bin/hadoop");
     }
+    if (!properties.containsKey(METASTORE_URI)) {
+      if (HiveServer2Type.InternalMetastore.equals(type)) {
+        properties.put(METASTORE_URI,
+          "thrift://localhost:" + String.valueOf(findPort()));
+      }
+    }
+    if (!properties.containsKey(METASTORE_HOOK)) {
+      properties.put(METASTORE_HOOK,
+          "org.apache.sentry.binding.metastore.MetastoreAuthzBinding");
+    }
+    if (!properties.containsKey(METASTORE_BYPASS)) {
+      properties.put(METASTORE_BYPASS,
+          "hive,impala," + System.getProperty("user.name", ""));
+    }
+    properties.put(METASTORE_SETUGI, "true");
+    properties.put(METASTORE_CLIENT_TIMEOUT, "100");
+    properties.put(ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, "true");
+
     properties.put(ConfVars.HIVESTATSAUTOGATHER.varname, "false");
     String hadoopBinPath = properties.get(HADOOPBIN);
     Assert.assertNotNull(hadoopBinPath, "Hadoop Bin");
@@ -161,7 +187,8 @@ public class HiveServerFactory {
     authzConf.writeXml(out);
     out.close();
     // points hive-site.xml at access-site.xml
-    hiveConf.set(HiveAuthzConf.HIVE_ACCESS_CONF_URL, accessSite.toURI().toURL().toExternalForm());
+    hiveConf.set(HiveAuthzConf.HIVE_SENTRY_CONF_URL, accessSite.toURI().toURL()
+        .toExternalForm());
     if(!properties.containsKey(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname)) {
       hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname,
         "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook");
@@ -183,6 +210,9 @@ public class HiveServerFactory {
     case InternalHiveServer2:
       LOGGER.info("Creating InternalHiveServer");
       return new InternalHiveServer(hiveConf);
+    case InternalMetastore:
+      LOGGER.info("Creating InternalHiveServer");
+      return new InternalMetastoreServer(hiveConf);
     case ExternalHiveServer2:
       LOGGER.info("Creating ExternalHiveServer");
       return new ExternalHiveServer(hiveConf, confDir, logDir);
@@ -201,8 +231,14 @@ public class HiveServerFactory {
   public static enum HiveServer2Type {
     EmbeddedHiveServer2,           // Embedded HS2, directly executed by JDBC, without thrift
     InternalHiveServer2,        // Start a thrift HS2 in the same process
+    InternalMetastore, // Start a thrift HS2 in the same process
     ExternalHiveServer2,   // start a remote thrift HS2
     UnmanagedHiveServer2      // Use a remote thrift HS2 already running
     ;
   }
+
+  public static boolean isInternalServer(HiveServer2Type hs2Type) {
+    return (HiveServer2Type.InternalHiveServer2.equals(hs2Type) || HiveServer2Type.InternalMetastore
+        .equals(hs2Type));
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalMetastoreServer.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalMetastoreServer.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalMetastoreServer.java
new file mode 100644
index 0000000..b1f404f
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/InternalMetastoreServer.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sentry.tests.e2e.hive.hiveserver;
+
+import java.net.URI;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.fest.reflect.core.Reflection;
+
+public class InternalMetastoreServer extends AbstractHiveServer {
+  private final HiveConf conf;
+  private ExecutorService metaStoreExecutor = Executors
+      .newSingleThreadExecutor();
+
+  public InternalMetastoreServer(HiveConf conf) throws Exception {
+    super(conf, getMetastoreHostname(conf), getMetastorePort(conf));
+    // Fix for ACCESS-148. Resets a static field
+    // so the default database is created even
+    // though is has been created before in this JVM
+    Reflection.staticField("createDefaultDB").ofType(boolean.class)
+        .in(HiveMetaStore.HMSHandler.class).set(false);
+    this.conf = conf;
+  }
+
+  @Override
+  public void start() throws Exception {
+    startMetastore();
+  }
+
+  @Override
+  public void shutdown() throws Exception {
+    metaStoreExecutor.shutdown();
+  }
+
+  // async metastore startup since Hive doesn't have that option
+  private void startMetastore() throws Exception {
+    Callable<Void> metastoreService = new Callable<Void>() {
+      public Void call() throws Exception {
+        try {
+          HiveMetaStore.startMetaStore(getMetastorePort(conf),
+              ShimLoader.getHadoopThriftAuthBridge(), conf);
+        } catch (Throwable e) {
+          throw new Exception("Error starting metastore", e);
+        }
+        return null;
+      }
+    };
+    metaStoreExecutor.submit(metastoreService);
+  }
+
+  private static String getMetastoreHostname(Configuration conf)
+      throws Exception {
+    return new URI(conf.get(HiveConf.ConfVars.METASTOREURIS.varname)).getHost();
+  }
+
+  private static int getMetastorePort(Configuration conf) throws Exception {
+    return new URI(conf.get(HiveConf.ConfVars.METASTOREURIS.varname)).getPort();
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
new file mode 100644
index 0000000..2e70b14
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.tests.e2e.metastore;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.sentry.tests.e2e.dbprovider.PolicyProviderForTest;
+import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration;
+import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory.HiveServer2Type;
+import org.junit.BeforeClass;
+
+public abstract class AbstractMetastoreTestWithStaticConfiguration extends
+    AbstractTestWithStaticConfiguration {
+
+  @BeforeClass
+  public static void setupTestStaticConfiguration() throws Exception {
+    useSentryService = true;
+    testServerType = HiveServer2Type.InternalMetastore.name();
+    AbstractTestWithStaticConfiguration.setupTestStaticConfiguration();
+    PolicyProviderForTest.setSentryClient(AbstractTestWithStaticConfiguration
+        .getSentryClient());
+  }
+
+  /**
+   * create a metastore table using the given attributes
+   * @param client
+   * @param dbName
+   * @param tabName
+   * @param cols
+   * @return
+   * @throws Exception
+   */
+  public Table createMetastoreTable(HiveMetaStoreClient client, String dbName,
+      String tabName, List<FieldSchema> cols) throws Exception {
+
+    Table tbl = makeMetastoreTableObject(client, dbName, tabName, cols);
+    client.createTable(tbl);
+    return tbl;
+  }
+
+  public Table createMetastoreTableWithLocation(HiveMetaStoreClient client,
+      String dbName, String tabName, List<FieldSchema> cols, String location)
+      throws Exception {
+    Table tbl = makeMetastoreTableObject(client, dbName, tabName, cols);
+    tbl.getSd().setLocation(location);
+    client.createTable(tbl);
+    return tbl;
+
+  }
+
+  public Table createMetastoreTableWithPartition(HiveMetaStoreClient client,
+      String dbName, String tabName, List<FieldSchema> cols,
+      List<FieldSchema> partionVals) throws Exception {
+    Table tbl = makeMetastoreTableObject(client, dbName, tabName, cols);
+    tbl.setPartitionKeys(partionVals);
+    client.createTable(tbl);
+    return client.getTable(dbName, tabName);
+  }
+
+  public void addPartition(HiveMetaStoreClient client, String dbName,
+      String tblName, List<String> ptnVals, Table tbl) throws Exception {
+    Partition part = makeMetastorePartitionObject(dbName, tblName, ptnVals, tbl);
+    Partition retp = client.add_partition(part);
+  }
+
+  public void addPartitionWithLocation(HiveMetaStoreClient client,
+      String dbName, String tblName, List<String> ptnVals, Table tbl,
+      String location) throws Exception {
+    Partition part = makeMetastorePartitionObject(dbName, tblName, ptnVals,
+        tbl, location);
+    client.add_partition(part);
+  }
+
+  public Table makeMetastoreTableObject(HiveMetaStoreClient client,
+      String dbName, String tabName, List<FieldSchema> cols) throws Exception {
+    Table tbl = new Table();
+    tbl.setDbName(dbName);
+    tbl.setTableName(tabName);
+    StorageDescriptor sd = new StorageDescriptor();
+    tbl.setSd(sd);
+    tbl.setParameters(new HashMap<String, String>());
+    sd.setCols(cols);
+    sd.setCompressed(false);
+    sd.setParameters(new HashMap<String, String>());
+    sd.setSerdeInfo(new SerDeInfo());
+    sd.getSerdeInfo().setName(tbl.getTableName());
+    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+    sd.getSerdeInfo().getParameters()
+        .put(serdeConstants.SERIALIZATION_FORMAT, "1");
+    sd.setSortCols(new ArrayList<Order>());
+    return tbl;
+  }
+
+  public Partition makeMetastorePartitionObject(String dbName, String tblName,
+      List<String> ptnVals, Table tbl, String partitionLocation) {
+    Partition part = makeMetastoreBasePartitionObject(dbName, tblName, ptnVals,
+        tbl);
+    part.getSd().setLocation(partitionLocation);
+    return part;
+  }
+
+  public Partition makeMetastorePartitionObject(String dbName, String tblName,
+      List<String> ptnVals, Table tbl) {
+    Partition part = makeMetastoreBasePartitionObject(dbName, tblName, ptnVals,
+        tbl);
+    part.getSd().setLocation("");
+    return part;
+  }
+
+  private Partition makeMetastoreBasePartitionObject(String dbName,
+      String tblName, List<String> ptnVals, Table tbl) {
+    Partition part4 = new Partition();
+    part4.setDbName(dbName);
+    part4.setTableName(tblName);
+    part4.setValues(ptnVals);
+    part4.setParameters(new HashMap<String, String>());
+    part4.setSd(tbl.getSd().deepCopy());
+    part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
+    part4.setParameters(new HashMap<String, String>());
+    return part4;
+  }
+
+  public void createMetastoreDB(HiveMetaStoreClient client, String dbName)
+      throws Exception {
+    Database db = new Database();
+    db.setName(dbName);
+    client.createDatabase(db);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
new file mode 100644
index 0000000..1aed84d
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
@@ -0,0 +1,387 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sentry.tests.e2e.metastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.util.ArrayList;
+
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.sentry.tests.e2e.dbprovider.PolicyProviderForTest;
+import org.apache.sentry.tests.e2e.hive.Context;
+import org.apache.sentry.tests.e2e.hive.StaticUserGroup;
+import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TestMetastoreEndToEnd extends
+    AbstractMetastoreTestWithStaticConfiguration {
+
+  private PolicyProviderForTest policyFile;
+  private static final String dbName = "db_1";
+  private static final String db_all_role = "all_db1";
+  private static final String uri_role = "uri_role";
+
+  @Before
+  public void setup() throws Exception {
+    context = createContext();
+    policyFile = PolicyProviderForTest.setAdminOnServer1(ADMINGROUP);
+    policyFile
+        .addRolesToGroup(USERGROUP1, db_all_role)
+        .addRolesToGroup(USERGROUP2, "read_db_role")
+        .addPermissionsToRole(db_all_role, "server=server1->db=" + dbName)
+        .addPermissionsToRole("read_db_role",
+            "server=server1->db=" + dbName + "->table=*->action=SELECT")
+        .setUserGroupMapping(StaticUserGroup.getStaticMapping());
+    writePolicyFile(policyFile);
+
+    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+    client.dropDatabase(dbName, true, true, true);
+    createMetastoreDB(client, dbName);
+    client.close();
+
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (context != null) {
+      context.close();
+    }
+  }
+
+  /**
+   * Setup admin privileges for user ADMIN1 verify user can create DB and tables
+   * @throws Exception
+   */
+  @Test
+  public void testServerPrivileges() throws Exception {
+    String tabName = "tab1";
+    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+    client.dropDatabase(dbName, true, true, true);
+
+    createMetastoreDB(client, dbName);
+    createMetastoreTable(client, dbName, tabName,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    assertEquals(1, client.getTables(dbName, tabName).size());
+    client.dropTable(dbName, tabName);
+    client.dropDatabase(dbName, true, true, true);
+  }
+
+  /**
+   * verify non-admin user can not create or drop DB
+   * @throws Exception
+   */
+  @Test
+  public void testNegativeServerPrivileges() throws Exception {
+    HiveMetaStoreClient client = context.getMetaStoreClient(USER1_1);
+    try {
+      createMetastoreDB(client, "fooDb");
+      fail("Creat db should have failed for non-admin user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    try {
+      client.dropDatabase(dbName, true, true, true);
+      fail("drop db should have failed for non-admin user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+  }
+
+  /**
+   * Verify the user with DB permission can create table in that db Verify the
+   * user can't create table in DB where he doesn't have ALL permissions
+   * @throws Exception
+   */
+  @Test
+  public void testTablePrivileges() throws Exception {
+    String tabName1 = "tab1";
+    String tabName2 = "tab2";
+
+    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+    createMetastoreTable(client, dbName, tabName1,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    client.close();
+
+    client = context.getMetaStoreClient(USER1_1);
+    createMetastoreTable(client, dbName, tabName2,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    assertEquals(1, client.getTables(dbName, tabName2).size());
+    client.dropTable(dbName, tabName1);
+    client.close();
+
+    client = context.getMetaStoreClient(USER2_1);
+    try {
+      createMetastoreTable(client, dbName, "barTab",
+          Lists.newArrayList(new FieldSchema("col1", "int", "")));
+      fail("Create table should have failed for non-privilege user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+
+    try {
+      client.dropTable(dbName, tabName2);
+      fail("drop table should have failed for non-privilege user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+  }
+
+  /**
+   * Verify alter table privileges
+   * @throws Exception
+   */
+  @Test
+  public void testAlterTablePrivileges() throws Exception {
+    String tabName1 = "tab1";
+
+    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+    createMetastoreTable(client, dbName, tabName1,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    client.close();
+
+    // verify group1 users with DDL privileges can alter tables in db_1
+    client = context.getMetaStoreClient(USER1_1);
+    Table metaTable2 = client.getTable(dbName, tabName1);
+    metaTable2.getSd().setCols(
+        Lists.newArrayList(new FieldSchema("col2", "double", "")));
+    client.alter_table(dbName, tabName1, metaTable2);
+    Table metaTable3 = client.getTable(dbName, tabName1);
+    assertEquals(metaTable2, metaTable3);
+
+    // verify group2 users can't alter tables in db_1
+    client = context.getMetaStoreClient(USER2_1);
+    metaTable2 = client.getTable(dbName, tabName1);
+    metaTable2.getSd().setCols(
+        Lists.newArrayList(new FieldSchema("col3", "string", "")));
+    try {
+      client.alter_table(dbName, tabName1, metaTable2);
+      fail("alter table should have failed for non-privilege user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+  }
+
+  /**
+   * Verify add partition privileges
+   * @throws Exception
+   */
+  @Test
+  public void testAddPartitionPrivileges() throws Exception {
+    String tabName = "tab1";
+    ArrayList<String> partVals1 = Lists.newArrayList("part1");
+    ArrayList<String> partVals2 = Lists.newArrayList("part2");
+    ArrayList<String> partVals3 = Lists.newArrayList("part2");
+
+    // user with ALL on DB should be able to add partition
+    HiveMetaStoreClient client = context.getMetaStoreClient(USER1_1);
+    Table tbl1 = createMetastoreTableWithPartition(client, dbName,
+        tabName, Lists.newArrayList(new FieldSchema("col1", "int", "")),
+        Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
+    assertEquals(1, client.getTables(dbName, tabName).size());
+    addPartition(client, dbName, tabName, partVals1, tbl1);
+    addPartition(client, dbName, tabName, partVals2, tbl1);
+    client.close();
+
+    // user without ALL on DB should NOT be able to add partition
+    client = context.getMetaStoreClient(USER2_1);
+    try {
+      addPartition(client, dbName, tabName, partVals3, tbl1);
+      fail("Add partition should have failed for non-admin user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+
+    // user with ALL on DB should be able to drop partition
+    client = context.getMetaStoreClient(USER1_1);
+    tbl1 = client.getTable(dbName, tabName);
+    client.dropPartition(dbName, tabName, partVals1, true);
+    client.close();
+
+    // user without ALL on DB should NOT be able to drop partition
+    client = context.getMetaStoreClient(USER2_1);
+    try {
+      addPartition(client, dbName, tabName, partVals2, tbl1);
+      fail("Drop partition should have failed for non-admin user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+  }
+
+  /**
+   * Verify URI privileges for alter table table
+   * @throws Exception
+   */
+  @Test
+  public void testUriTablePrivileges() throws Exception {
+    String tabName1 = "tab1";
+    String tabName2 = "tab2";
+    String newPath1 = "fooTab1";
+    String newPath2 = "fooTab2";
+
+    String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath1;
+    String tabDir2 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath2;
+    policyFile.addRolesToGroup(USERGROUP1, uri_role)
+        .addRolesToGroup(USERGROUP2, db_all_role)
+        .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir1)
+        .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir2);
+    writePolicyFile(policyFile);
+
+    // create table
+    HiveMetaStoreClient client = context.getMetaStoreClient(USER2_1);
+    createMetastoreTable(client, dbName, tabName1,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    client.close();
+
+    // user with URI privileges should be able to create table with that specific location
+    client = context.getMetaStoreClient(USER1_1);
+    createMetastoreTableWithLocation(client, dbName, tabName2,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")), tabDir2);
+    client.close();
+
+    // user without URI privileges should be NOT able to create table with that specific location
+    client = context.getMetaStoreClient(USER2_1);
+    try {
+      createMetastoreTableWithLocation(client, dbName, tabName2,
+          Lists.newArrayList(new FieldSchema("col1", "int", "")), tabDir2);
+      fail("Create table with location should fail without URI privilege");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+
+    // user with URI privileges should be able to alter table to set that specific location
+    client = context.getMetaStoreClient(USER1_1);
+    Table metaTable1 = client.getTable(dbName, tabName1);
+    metaTable1.getSd().setLocation(tabDir1);
+    client.alter_table(dbName, tabName1, metaTable1);
+    client.close();
+
+    // user without URI privileges should be NOT able to alter table to set that
+    // specific location
+    client = context.getMetaStoreClient(USER2_1);
+    Table metaTable2 = client.getTable(dbName, tabName2);
+    metaTable1.getSd().setLocation(tabDir1);
+    try {
+      client.alter_table(dbName, tabName1, metaTable2);
+      fail("Alter table with location should fail without URI privilege");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+  }
+
+  /**
+   * Verify URI privileges for alter table table
+   * @throws Exception
+   */
+  @Test
+  public void testUriPartitionPrivileges() throws Exception {
+    String tabName1 = "tab1";
+    String newPath1 = "fooTab1";
+    String newPath2 = "fooTab2";
+    ArrayList<String> partVals1 = Lists.newArrayList("part1");
+    ArrayList<String> partVals2 = Lists.newArrayList("part2");
+    ArrayList<String> partVals3 = Lists.newArrayList("part2");
+
+    String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath1;
+    String tabDir2 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath2;
+    policyFile.addRolesToGroup(USERGROUP1, uri_role)
+        .addRolesToGroup(USERGROUP2, db_all_role)
+        .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir1)
+        .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir2);
+    writePolicyFile(policyFile);
+
+
+    // user with URI privileges should be able to alter partition to set that specific location
+    HiveMetaStoreClient client = context.getMetaStoreClient(USER1_1);
+    Table tbl1 = createMetastoreTableWithPartition(client, dbName,
+        tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")),
+        Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
+    addPartition(client, dbName, tabName1, partVals1, tbl1);
+    addPartitionWithLocation(client, dbName, tabName1, partVals2, tbl1,
+        tabDir1);
+    client.close();
+
+    // user without URI privileges should be NOT able to alter partition to set
+    // that specific location
+    client = context.getMetaStoreClient(USER2_1);
+    try {
+      tbl1 = client.getTable(dbName, tabName1);
+      addPartitionWithLocation(client, dbName, tabName1, partVals3,
+          tbl1, tabDir2);
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+  }
+
+  /**
+   * Verify alter partion privileges
+   * TODO: We seem to have a bit inconsistency with Alter partition. It's only
+   * allowed with SERVER privilege. If we allow add/drop partition with DB
+   * level privilege, then this should also be at the same level.
+   * @throws Exception
+   */
+  @Test
+  public void testAlterSetLocationPrivileges() throws Exception {
+    String tabName1 = "tab1";
+    ArrayList<String> partVals1 = Lists.newArrayList("part1");
+
+    // user with Server privileges should be able to alter partition
+    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+    Table tbl1 = createMetastoreTableWithPartition(client, dbName,
+        tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")),
+        Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
+    addPartition(client, dbName, tabName1, partVals1, tbl1);
+    Partition newPartition = client.getPartition(dbName, tabName1, partVals1);
+    client.alter_partition(dbName, tabName1, newPartition);
+    client.close();
+
+    // user without SERVER privileges should be able to alter partition to set
+    // that specific location
+    client = context.getMetaStoreClient(USER1_1);
+    tbl1 = client.getTable(dbName, tabName1);
+    newPartition = client.getPartition(dbName, tabName1, partVals1);
+    try {
+      client.alter_partition(dbName, tabName1, newPartition);
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c04138d3/sentry-tests/sentry-tests-hive/src/test/resources/core-site.xml
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/resources/core-site.xml b/sentry-tests/sentry-tests-hive/src/test/resources/core-site.xml
new file mode 100644
index 0000000..676dc12
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive/src/test/resources/core-site.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+  <property>
+    <name>fs.permissions</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>000</value>
+  </property>
+  <property>
+    <name>hadoop.security.group.mapping</name>
+    <value>org.apache.sentry.tests.e2e.hive.fs.MiniDFS$PseudoGroupMappingService</value>
+  </property>
+</configuration>