You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@sentry.apache.org by ls...@apache.org on 2014/11/13 08:43:38 UTC

[1/9] incubator-sentry git commit: SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

Repository: incubator-sentry
Updated Branches:
  refs/heads/master 49e6086c2 -> 2e509e4bc


http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/StaticUserGroup.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/StaticUserGroup.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/StaticUserGroup.java
index 66f088f..8306e95 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/StaticUserGroup.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/StaticUserGroup.java
@@ -27,6 +27,7 @@ public class StaticUserGroup {
       USER2_1 = "user2_1",
       USER3_1 = "user3_1",
       USER4_1 = "user4_1",
+      HIVE = "hive",
       USERGROUP1 = "user_group1",
       USERGROUP2 = "user_group2",
       USERGROUP3 = "user_group3",
@@ -39,6 +40,7 @@ public class StaticUserGroup {
     ADMINGROUP = System.getProperty("sentry.e2etest.admin.group", "admin");
     staticMapping = new HashMap<String, String>();
     staticMapping.put(ADMIN1, ADMINGROUP);
+    staticMapping.put(HIVE, HIVE);
     staticMapping.put(USER1_1, USERGROUP1);
     staticMapping.put(USER1_2, USERGROUP1);
     staticMapping.put(USER2_1, USERGROUP2);

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
index c9a414e..d4be70a 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
@@ -40,7 +40,9 @@ import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.pig.PigServer;
+import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
 import org.apache.sentry.provider.file.PolicyFile;
+import org.apache.sentry.service.thrift.SentryServiceClientFactory;
 import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration;
 import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory.HiveServer2Type;
 import org.junit.After;

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
index 8ce78bc..90428cb 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
@@ -105,19 +105,42 @@ public class TestMetastoreEndToEnd extends
    * Setup admin privileges for user ADMIN1 verify user can create DB and tables
    * @throws Exception
    */
-  @Test
-  public void testServerPrivileges() throws Exception {
-    String tabName = "tab1";
-    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
-    client.dropDatabase(dbName, true, true, true);
-
-    createMetastoreDB(client, dbName);
-    createMetastoreTable(client, dbName, tabName,
-        Lists.newArrayList(new FieldSchema("col1", "int", "")));
-    assertEquals(1, client.getTables(dbName, tabName).size());
-    client.dropTable(dbName, tabName);
-    client.dropDatabase(dbName, true, true, true);
-  }
+//  @Test
+//  public void testServerPrivileges() throws Exception {
+//    String tabName = "tab1";
+//    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+//    client.dropDatabase(dbName, true, true, true);
+//
+//    createMetastoreDB(client, dbName);
+//    createMetastoreTable(client, dbName, tabName,
+//        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+//    assertEquals(1, client.getTables(dbName, tabName).size());
+//    
+//    AuthzPathsCache authzPathCache = new AuthzPathsCache(null, new String[]{"/"}, 0);
+//    SentryPolicyServiceClient sentryClient = new SentryServiceClientFactory().create(sentryConf);
+//    waitToCommit(authzPathCache, sentryClient);
+//    assertEquals("/%PREFIX[data%DIR[db_1.db%AUTHZ_OBJECT#db_1[tab1%AUTHZ_OBJECT#db_1.tab1[]]]]", authzPathCache.serializeAllPaths());
+//    client.dropTable(dbName, tabName);
+//    client.dropDatabase(dbName, true, true, true);
+//    waitToCommit(authzPathCache, sentryClient);
+//    assertEquals("/%PREFIX[]", authzPathCache.serializeAllPaths());
+//  }
+//
+//  private void waitToCommit(AuthzPathsCache authzPathCache, SentryPolicyServiceClient sentryClient)
+//      throws Exception {
+//    SentryAuthzUpdate allUpdates = sentryClient.getAllUpdatesFrom(0, 0);
+//    for (HMSUpdate update : allUpdates.pathUpdates) {
+//      authzPathCache.handleUpdateNotification(update);
+//    }
+//    int counter = 0;
+//    while(!authzPathCache.areAllUpdatesCommited()) {
+//      Thread.sleep(200);
+//      counter++;
+//      if (counter > 10000) {
+//        fail("Updates taking too long to commit !!");
+//      }
+//    }
+//  }
 
   /**
    * verify non-admin user can not create or drop DB


[7/9] incubator-sentry git commit: SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

Posted by ls...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TAuthzUpdateResponse.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TAuthzUpdateResponse.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TAuthzUpdateResponse.java
new file mode 100644
index 0000000..480c264
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TAuthzUpdateResponse.java
@@ -0,0 +1,603 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.sentry.hdfs.service.thrift;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TAuthzUpdateResponse implements org.apache.thrift.TBase<TAuthzUpdateResponse, TAuthzUpdateResponse._Fields>, java.io.Serializable, Cloneable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAuthzUpdateResponse");
+
+  private static final org.apache.thrift.protocol.TField AUTHZ_PATH_UPDATE_FIELD_DESC = new org.apache.thrift.protocol.TField("authzPathUpdate", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField AUTHZ_PERM_UPDATE_FIELD_DESC = new org.apache.thrift.protocol.TField("authzPermUpdate", org.apache.thrift.protocol.TType.LIST, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TAuthzUpdateResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TAuthzUpdateResponseTupleSchemeFactory());
+  }
+
+  private List<TPathsUpdate> authzPathUpdate; // optional
+  private List<TPermissionsUpdate> authzPermUpdate; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    AUTHZ_PATH_UPDATE((short)1, "authzPathUpdate"),
+    AUTHZ_PERM_UPDATE((short)2, "authzPermUpdate");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // AUTHZ_PATH_UPDATE
+          return AUTHZ_PATH_UPDATE;
+        case 2: // AUTHZ_PERM_UPDATE
+          return AUTHZ_PERM_UPDATE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private _Fields optionals[] = {_Fields.AUTHZ_PATH_UPDATE,_Fields.AUTHZ_PERM_UPDATE};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.AUTHZ_PATH_UPDATE, new org.apache.thrift.meta_data.FieldMetaData("authzPathUpdate", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPathsUpdate.class))));
+    tmpMap.put(_Fields.AUTHZ_PERM_UPDATE, new org.apache.thrift.meta_data.FieldMetaData("authzPermUpdate", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPermissionsUpdate.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TAuthzUpdateResponse.class, metaDataMap);
+  }
+
+  public TAuthzUpdateResponse() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TAuthzUpdateResponse(TAuthzUpdateResponse other) {
+    if (other.isSetAuthzPathUpdate()) {
+      List<TPathsUpdate> __this__authzPathUpdate = new ArrayList<TPathsUpdate>();
+      for (TPathsUpdate other_element : other.authzPathUpdate) {
+        __this__authzPathUpdate.add(new TPathsUpdate(other_element));
+      }
+      this.authzPathUpdate = __this__authzPathUpdate;
+    }
+    if (other.isSetAuthzPermUpdate()) {
+      List<TPermissionsUpdate> __this__authzPermUpdate = new ArrayList<TPermissionsUpdate>();
+      for (TPermissionsUpdate other_element : other.authzPermUpdate) {
+        __this__authzPermUpdate.add(new TPermissionsUpdate(other_element));
+      }
+      this.authzPermUpdate = __this__authzPermUpdate;
+    }
+  }
+
+  public TAuthzUpdateResponse deepCopy() {
+    return new TAuthzUpdateResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.authzPathUpdate = null;
+    this.authzPermUpdate = null;
+  }
+
+  public int getAuthzPathUpdateSize() {
+    return (this.authzPathUpdate == null) ? 0 : this.authzPathUpdate.size();
+  }
+
+  public java.util.Iterator<TPathsUpdate> getAuthzPathUpdateIterator() {
+    return (this.authzPathUpdate == null) ? null : this.authzPathUpdate.iterator();
+  }
+
+  public void addToAuthzPathUpdate(TPathsUpdate elem) {
+    if (this.authzPathUpdate == null) {
+      this.authzPathUpdate = new ArrayList<TPathsUpdate>();
+    }
+    this.authzPathUpdate.add(elem);
+  }
+
+  public List<TPathsUpdate> getAuthzPathUpdate() {
+    return this.authzPathUpdate;
+  }
+
+  public void setAuthzPathUpdate(List<TPathsUpdate> authzPathUpdate) {
+    this.authzPathUpdate = authzPathUpdate;
+  }
+
+  public void unsetAuthzPathUpdate() {
+    this.authzPathUpdate = null;
+  }
+
+  /** Returns true if field authzPathUpdate is set (has been assigned a value) and false otherwise */
+  public boolean isSetAuthzPathUpdate() {
+    return this.authzPathUpdate != null;
+  }
+
+  public void setAuthzPathUpdateIsSet(boolean value) {
+    if (!value) {
+      this.authzPathUpdate = null;
+    }
+  }
+
+  public int getAuthzPermUpdateSize() {
+    return (this.authzPermUpdate == null) ? 0 : this.authzPermUpdate.size();
+  }
+
+  public java.util.Iterator<TPermissionsUpdate> getAuthzPermUpdateIterator() {
+    return (this.authzPermUpdate == null) ? null : this.authzPermUpdate.iterator();
+  }
+
+  public void addToAuthzPermUpdate(TPermissionsUpdate elem) {
+    if (this.authzPermUpdate == null) {
+      this.authzPermUpdate = new ArrayList<TPermissionsUpdate>();
+    }
+    this.authzPermUpdate.add(elem);
+  }
+
+  public List<TPermissionsUpdate> getAuthzPermUpdate() {
+    return this.authzPermUpdate;
+  }
+
+  public void setAuthzPermUpdate(List<TPermissionsUpdate> authzPermUpdate) {
+    this.authzPermUpdate = authzPermUpdate;
+  }
+
+  public void unsetAuthzPermUpdate() {
+    this.authzPermUpdate = null;
+  }
+
+  /** Returns true if field authzPermUpdate is set (has been assigned a value) and false otherwise */
+  public boolean isSetAuthzPermUpdate() {
+    return this.authzPermUpdate != null;
+  }
+
+  public void setAuthzPermUpdateIsSet(boolean value) {
+    if (!value) {
+      this.authzPermUpdate = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case AUTHZ_PATH_UPDATE:
+      if (value == null) {
+        unsetAuthzPathUpdate();
+      } else {
+        setAuthzPathUpdate((List<TPathsUpdate>)value);
+      }
+      break;
+
+    case AUTHZ_PERM_UPDATE:
+      if (value == null) {
+        unsetAuthzPermUpdate();
+      } else {
+        setAuthzPermUpdate((List<TPermissionsUpdate>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case AUTHZ_PATH_UPDATE:
+      return getAuthzPathUpdate();
+
+    case AUTHZ_PERM_UPDATE:
+      return getAuthzPermUpdate();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case AUTHZ_PATH_UPDATE:
+      return isSetAuthzPathUpdate();
+    case AUTHZ_PERM_UPDATE:
+      return isSetAuthzPermUpdate();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TAuthzUpdateResponse)
+      return this.equals((TAuthzUpdateResponse)that);
+    return false;
+  }
+
+  public boolean equals(TAuthzUpdateResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_authzPathUpdate = true && this.isSetAuthzPathUpdate();
+    boolean that_present_authzPathUpdate = true && that.isSetAuthzPathUpdate();
+    if (this_present_authzPathUpdate || that_present_authzPathUpdate) {
+      if (!(this_present_authzPathUpdate && that_present_authzPathUpdate))
+        return false;
+      if (!this.authzPathUpdate.equals(that.authzPathUpdate))
+        return false;
+    }
+
+    boolean this_present_authzPermUpdate = true && this.isSetAuthzPermUpdate();
+    boolean that_present_authzPermUpdate = true && that.isSetAuthzPermUpdate();
+    if (this_present_authzPermUpdate || that_present_authzPermUpdate) {
+      if (!(this_present_authzPermUpdate && that_present_authzPermUpdate))
+        return false;
+      if (!this.authzPermUpdate.equals(that.authzPermUpdate))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_authzPathUpdate = true && (isSetAuthzPathUpdate());
+    builder.append(present_authzPathUpdate);
+    if (present_authzPathUpdate)
+      builder.append(authzPathUpdate);
+
+    boolean present_authzPermUpdate = true && (isSetAuthzPermUpdate());
+    builder.append(present_authzPermUpdate);
+    if (present_authzPermUpdate)
+      builder.append(authzPermUpdate);
+
+    return builder.toHashCode();
+  }
+
+  public int compareTo(TAuthzUpdateResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+    TAuthzUpdateResponse typedOther = (TAuthzUpdateResponse)other;
+
+    lastComparison = Boolean.valueOf(isSetAuthzPathUpdate()).compareTo(typedOther.isSetAuthzPathUpdate());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAuthzPathUpdate()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authzPathUpdate, typedOther.authzPathUpdate);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAuthzPermUpdate()).compareTo(typedOther.isSetAuthzPermUpdate());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAuthzPermUpdate()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authzPermUpdate, typedOther.authzPermUpdate);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TAuthzUpdateResponse(");
+    boolean first = true;
+
+    if (isSetAuthzPathUpdate()) {
+      sb.append("authzPathUpdate:");
+      if (this.authzPathUpdate == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.authzPathUpdate);
+      }
+      first = false;
+    }
+    if (isSetAuthzPermUpdate()) {
+      if (!first) sb.append(", ");
+      sb.append("authzPermUpdate:");
+      if (this.authzPermUpdate == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.authzPermUpdate);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TAuthzUpdateResponseStandardSchemeFactory implements SchemeFactory {
+    public TAuthzUpdateResponseStandardScheme getScheme() {
+      return new TAuthzUpdateResponseStandardScheme();
+    }
+  }
+
+  private static class TAuthzUpdateResponseStandardScheme extends StandardScheme<TAuthzUpdateResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TAuthzUpdateResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // AUTHZ_PATH_UPDATE
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list114 = iprot.readListBegin();
+                struct.authzPathUpdate = new ArrayList<TPathsUpdate>(_list114.size);
+                for (int _i115 = 0; _i115 < _list114.size; ++_i115)
+                {
+                  TPathsUpdate _elem116; // required
+                  _elem116 = new TPathsUpdate();
+                  _elem116.read(iprot);
+                  struct.authzPathUpdate.add(_elem116);
+                }
+                iprot.readListEnd();
+              }
+              struct.setAuthzPathUpdateIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // AUTHZ_PERM_UPDATE
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list117 = iprot.readListBegin();
+                struct.authzPermUpdate = new ArrayList<TPermissionsUpdate>(_list117.size);
+                for (int _i118 = 0; _i118 < _list117.size; ++_i118)
+                {
+                  TPermissionsUpdate _elem119; // required
+                  _elem119 = new TPermissionsUpdate();
+                  _elem119.read(iprot);
+                  struct.authzPermUpdate.add(_elem119);
+                }
+                iprot.readListEnd();
+              }
+              struct.setAuthzPermUpdateIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TAuthzUpdateResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.authzPathUpdate != null) {
+        if (struct.isSetAuthzPathUpdate()) {
+          oprot.writeFieldBegin(AUTHZ_PATH_UPDATE_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.authzPathUpdate.size()));
+            for (TPathsUpdate _iter120 : struct.authzPathUpdate)
+            {
+              _iter120.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.authzPermUpdate != null) {
+        if (struct.isSetAuthzPermUpdate()) {
+          oprot.writeFieldBegin(AUTHZ_PERM_UPDATE_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.authzPermUpdate.size()));
+            for (TPermissionsUpdate _iter121 : struct.authzPermUpdate)
+            {
+              _iter121.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TAuthzUpdateResponseTupleSchemeFactory implements SchemeFactory {
+    public TAuthzUpdateResponseTupleScheme getScheme() {
+      return new TAuthzUpdateResponseTupleScheme();
+    }
+  }
+
+  private static class TAuthzUpdateResponseTupleScheme extends TupleScheme<TAuthzUpdateResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TAuthzUpdateResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetAuthzPathUpdate()) {
+        optionals.set(0);
+      }
+      if (struct.isSetAuthzPermUpdate()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetAuthzPathUpdate()) {
+        {
+          oprot.writeI32(struct.authzPathUpdate.size());
+          for (TPathsUpdate _iter122 : struct.authzPathUpdate)
+          {
+            _iter122.write(oprot);
+          }
+        }
+      }
+      if (struct.isSetAuthzPermUpdate()) {
+        {
+          oprot.writeI32(struct.authzPermUpdate.size());
+          for (TPermissionsUpdate _iter123 : struct.authzPermUpdate)
+          {
+            _iter123.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TAuthzUpdateResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list124 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.authzPathUpdate = new ArrayList<TPathsUpdate>(_list124.size);
+          for (int _i125 = 0; _i125 < _list124.size; ++_i125)
+          {
+            TPathsUpdate _elem126; // required
+            _elem126 = new TPathsUpdate();
+            _elem126.read(iprot);
+            struct.authzPathUpdate.add(_elem126);
+          }
+        }
+        struct.setAuthzPathUpdateIsSet(true);
+      }
+      if (incoming.get(1)) {
+        {
+          org.apache.thrift.protocol.TList _list127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.authzPermUpdate = new ArrayList<TPermissionsUpdate>(_list127.size);
+          for (int _i128 = 0; _i128 < _list127.size; ++_i128)
+          {
+            TPermissionsUpdate _elem129; // required
+            _elem129 = new TPermissionsUpdate();
+            _elem129.read(iprot);
+            struct.authzPermUpdate.add(_elem129);
+          }
+        }
+        struct.setAuthzPermUpdateIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathChanges.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathChanges.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathChanges.java
new file mode 100644
index 0000000..85254d7
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathChanges.java
@@ -0,0 +1,765 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.sentry.hdfs.service.thrift;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TPathChanges implements org.apache.thrift.TBase<TPathChanges, TPathChanges._Fields>, java.io.Serializable, Cloneable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPathChanges");
+
+  private static final org.apache.thrift.protocol.TField AUTHZ_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("authzObj", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField ADD_PATHS_FIELD_DESC = new org.apache.thrift.protocol.TField("addPaths", org.apache.thrift.protocol.TType.LIST, (short)2);
+  private static final org.apache.thrift.protocol.TField DEL_PATHS_FIELD_DESC = new org.apache.thrift.protocol.TField("delPaths", org.apache.thrift.protocol.TType.LIST, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TPathChangesStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TPathChangesTupleSchemeFactory());
+  }
+
+  private String authzObj; // required
+  private List<List<String>> addPaths; // required
+  private List<List<String>> delPaths; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    AUTHZ_OBJ((short)1, "authzObj"),
+    ADD_PATHS((short)2, "addPaths"),
+    DEL_PATHS((short)3, "delPaths");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // AUTHZ_OBJ
+          return AUTHZ_OBJ;
+        case 2: // ADD_PATHS
+          return ADD_PATHS;
+        case 3: // DEL_PATHS
+          return DEL_PATHS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.AUTHZ_OBJ, new org.apache.thrift.meta_data.FieldMetaData("authzObj", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ADD_PATHS, new org.apache.thrift.meta_data.FieldMetaData("addPaths", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))));
+    tmpMap.put(_Fields.DEL_PATHS, new org.apache.thrift.meta_data.FieldMetaData("delPaths", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPathChanges.class, metaDataMap);
+  }
+
+  public TPathChanges() {
+  }
+
+  public TPathChanges(
+    String authzObj,
+    List<List<String>> addPaths,
+    List<List<String>> delPaths)
+  {
+    this();
+    this.authzObj = authzObj;
+    this.addPaths = addPaths;
+    this.delPaths = delPaths;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TPathChanges(TPathChanges other) {
+    if (other.isSetAuthzObj()) {
+      this.authzObj = other.authzObj;
+    }
+    if (other.isSetAddPaths()) {
+      List<List<String>> __this__addPaths = new ArrayList<List<String>>();
+      for (List<String> other_element : other.addPaths) {
+        List<String> __this__addPaths_copy = new ArrayList<String>();
+        for (String other_element_element : other_element) {
+          __this__addPaths_copy.add(other_element_element);
+        }
+        __this__addPaths.add(__this__addPaths_copy);
+      }
+      this.addPaths = __this__addPaths;
+    }
+    if (other.isSetDelPaths()) {
+      List<List<String>> __this__delPaths = new ArrayList<List<String>>();
+      for (List<String> other_element : other.delPaths) {
+        List<String> __this__delPaths_copy = new ArrayList<String>();
+        for (String other_element_element : other_element) {
+          __this__delPaths_copy.add(other_element_element);
+        }
+        __this__delPaths.add(__this__delPaths_copy);
+      }
+      this.delPaths = __this__delPaths;
+    }
+  }
+
+  public TPathChanges deepCopy() {
+    return new TPathChanges(this);
+  }
+
+  @Override
+  public void clear() {
+    this.authzObj = null;
+    this.addPaths = null;
+    this.delPaths = null;
+  }
+
+  public String getAuthzObj() {
+    return this.authzObj;
+  }
+
+  public void setAuthzObj(String authzObj) {
+    this.authzObj = authzObj;
+  }
+
+  public void unsetAuthzObj() {
+    this.authzObj = null;
+  }
+
+  /** Returns true if field authzObj is set (has been assigned a value) and false otherwise */
+  public boolean isSetAuthzObj() {
+    return this.authzObj != null;
+  }
+
+  public void setAuthzObjIsSet(boolean value) {
+    if (!value) {
+      this.authzObj = null;
+    }
+  }
+
+  public int getAddPathsSize() {
+    return (this.addPaths == null) ? 0 : this.addPaths.size();
+  }
+
+  public java.util.Iterator<List<String>> getAddPathsIterator() {
+    return (this.addPaths == null) ? null : this.addPaths.iterator();
+  }
+
+  public void addToAddPaths(List<String> elem) {
+    if (this.addPaths == null) {
+      this.addPaths = new ArrayList<List<String>>();
+    }
+    this.addPaths.add(elem);
+  }
+
+  public List<List<String>> getAddPaths() {
+    return this.addPaths;
+  }
+
+  public void setAddPaths(List<List<String>> addPaths) {
+    this.addPaths = addPaths;
+  }
+
+  public void unsetAddPaths() {
+    this.addPaths = null;
+  }
+
+  /** Returns true if field addPaths is set (has been assigned a value) and false otherwise */
+  public boolean isSetAddPaths() {
+    return this.addPaths != null;
+  }
+
+  public void setAddPathsIsSet(boolean value) {
+    if (!value) {
+      this.addPaths = null;
+    }
+  }
+
+  public int getDelPathsSize() {
+    return (this.delPaths == null) ? 0 : this.delPaths.size();
+  }
+
+  public java.util.Iterator<List<String>> getDelPathsIterator() {
+    return (this.delPaths == null) ? null : this.delPaths.iterator();
+  }
+
+  public void addToDelPaths(List<String> elem) {
+    if (this.delPaths == null) {
+      this.delPaths = new ArrayList<List<String>>();
+    }
+    this.delPaths.add(elem);
+  }
+
+  public List<List<String>> getDelPaths() {
+    return this.delPaths;
+  }
+
+  public void setDelPaths(List<List<String>> delPaths) {
+    this.delPaths = delPaths;
+  }
+
+  public void unsetDelPaths() {
+    this.delPaths = null;
+  }
+
+  /** Returns true if field delPaths is set (has been assigned a value) and false otherwise */
+  public boolean isSetDelPaths() {
+    return this.delPaths != null;
+  }
+
+  public void setDelPathsIsSet(boolean value) {
+    if (!value) {
+      this.delPaths = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case AUTHZ_OBJ:
+      if (value == null) {
+        unsetAuthzObj();
+      } else {
+        setAuthzObj((String)value);
+      }
+      break;
+
+    case ADD_PATHS:
+      if (value == null) {
+        unsetAddPaths();
+      } else {
+        setAddPaths((List<List<String>>)value);
+      }
+      break;
+
+    case DEL_PATHS:
+      if (value == null) {
+        unsetDelPaths();
+      } else {
+        setDelPaths((List<List<String>>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case AUTHZ_OBJ:
+      return getAuthzObj();
+
+    case ADD_PATHS:
+      return getAddPaths();
+
+    case DEL_PATHS:
+      return getDelPaths();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case AUTHZ_OBJ:
+      return isSetAuthzObj();
+    case ADD_PATHS:
+      return isSetAddPaths();
+    case DEL_PATHS:
+      return isSetDelPaths();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TPathChanges)
+      return this.equals((TPathChanges)that);
+    return false;
+  }
+
+  public boolean equals(TPathChanges that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_authzObj = true && this.isSetAuthzObj();
+    boolean that_present_authzObj = true && that.isSetAuthzObj();
+    if (this_present_authzObj || that_present_authzObj) {
+      if (!(this_present_authzObj && that_present_authzObj))
+        return false;
+      if (!this.authzObj.equals(that.authzObj))
+        return false;
+    }
+
+    boolean this_present_addPaths = true && this.isSetAddPaths();
+    boolean that_present_addPaths = true && that.isSetAddPaths();
+    if (this_present_addPaths || that_present_addPaths) {
+      if (!(this_present_addPaths && that_present_addPaths))
+        return false;
+      if (!this.addPaths.equals(that.addPaths))
+        return false;
+    }
+
+    boolean this_present_delPaths = true && this.isSetDelPaths();
+    boolean that_present_delPaths = true && that.isSetDelPaths();
+    if (this_present_delPaths || that_present_delPaths) {
+      if (!(this_present_delPaths && that_present_delPaths))
+        return false;
+      if (!this.delPaths.equals(that.delPaths))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_authzObj = true && (isSetAuthzObj());
+    builder.append(present_authzObj);
+    if (present_authzObj)
+      builder.append(authzObj);
+
+    boolean present_addPaths = true && (isSetAddPaths());
+    builder.append(present_addPaths);
+    if (present_addPaths)
+      builder.append(addPaths);
+
+    boolean present_delPaths = true && (isSetDelPaths());
+    builder.append(present_delPaths);
+    if (present_delPaths)
+      builder.append(delPaths);
+
+    return builder.toHashCode();
+  }
+
+  public int compareTo(TPathChanges other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+    TPathChanges typedOther = (TPathChanges)other;
+
+    lastComparison = Boolean.valueOf(isSetAuthzObj()).compareTo(typedOther.isSetAuthzObj());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAuthzObj()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authzObj, typedOther.authzObj);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAddPaths()).compareTo(typedOther.isSetAddPaths());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAddPaths()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.addPaths, typedOther.addPaths);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDelPaths()).compareTo(typedOther.isSetDelPaths());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDelPaths()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delPaths, typedOther.delPaths);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TPathChanges(");
+    boolean first = true;
+
+    sb.append("authzObj:");
+    if (this.authzObj == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.authzObj);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("addPaths:");
+    if (this.addPaths == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.addPaths);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("delPaths:");
+    if (this.delPaths == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.delPaths);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetAuthzObj()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'authzObj' is unset! Struct:" + toString());
+    }
+
+    if (!isSetAddPaths()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'addPaths' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDelPaths()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'delPaths' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TPathChangesStandardSchemeFactory implements SchemeFactory {
+    public TPathChangesStandardScheme getScheme() {
+      return new TPathChangesStandardScheme();
+    }
+  }
+
+  private static class TPathChangesStandardScheme extends StandardScheme<TPathChanges> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TPathChanges struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // AUTHZ_OBJ
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.authzObj = iprot.readString();
+              struct.setAuthzObjIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // ADD_PATHS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
+                struct.addPaths = new ArrayList<List<String>>(_list0.size);
+                for (int _i1 = 0; _i1 < _list0.size; ++_i1)
+                {
+                  List<String> _elem2; // required
+                  {
+                    org.apache.thrift.protocol.TList _list3 = iprot.readListBegin();
+                    _elem2 = new ArrayList<String>(_list3.size);
+                    for (int _i4 = 0; _i4 < _list3.size; ++_i4)
+                    {
+                      String _elem5; // required
+                      _elem5 = iprot.readString();
+                      _elem2.add(_elem5);
+                    }
+                    iprot.readListEnd();
+                  }
+                  struct.addPaths.add(_elem2);
+                }
+                iprot.readListEnd();
+              }
+              struct.setAddPathsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // DEL_PATHS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list6 = iprot.readListBegin();
+                struct.delPaths = new ArrayList<List<String>>(_list6.size);
+                for (int _i7 = 0; _i7 < _list6.size; ++_i7)
+                {
+                  List<String> _elem8; // required
+                  {
+                    org.apache.thrift.protocol.TList _list9 = iprot.readListBegin();
+                    _elem8 = new ArrayList<String>(_list9.size);
+                    for (int _i10 = 0; _i10 < _list9.size; ++_i10)
+                    {
+                      String _elem11; // required
+                      _elem11 = iprot.readString();
+                      _elem8.add(_elem11);
+                    }
+                    iprot.readListEnd();
+                  }
+                  struct.delPaths.add(_elem8);
+                }
+                iprot.readListEnd();
+              }
+              struct.setDelPathsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TPathChanges struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.authzObj != null) {
+        oprot.writeFieldBegin(AUTHZ_OBJ_FIELD_DESC);
+        oprot.writeString(struct.authzObj);
+        oprot.writeFieldEnd();
+      }
+      if (struct.addPaths != null) {
+        oprot.writeFieldBegin(ADD_PATHS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, struct.addPaths.size()));
+          for (List<String> _iter12 : struct.addPaths)
+          {
+            {
+              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter12.size()));
+              for (String _iter13 : _iter12)
+              {
+                oprot.writeString(_iter13);
+              }
+              oprot.writeListEnd();
+            }
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.delPaths != null) {
+        oprot.writeFieldBegin(DEL_PATHS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, struct.delPaths.size()));
+          for (List<String> _iter14 : struct.delPaths)
+          {
+            {
+              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter14.size()));
+              for (String _iter15 : _iter14)
+              {
+                oprot.writeString(_iter15);
+              }
+              oprot.writeListEnd();
+            }
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TPathChangesTupleSchemeFactory implements SchemeFactory {
+    public TPathChangesTupleScheme getScheme() {
+      return new TPathChangesTupleScheme();
+    }
+  }
+
+  private static class TPathChangesTupleScheme extends TupleScheme<TPathChanges> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TPathChanges struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.authzObj);
+      {
+        oprot.writeI32(struct.addPaths.size());
+        for (List<String> _iter16 : struct.addPaths)
+        {
+          {
+            oprot.writeI32(_iter16.size());
+            for (String _iter17 : _iter16)
+            {
+              oprot.writeString(_iter17);
+            }
+          }
+        }
+      }
+      {
+        oprot.writeI32(struct.delPaths.size());
+        for (List<String> _iter18 : struct.delPaths)
+        {
+          {
+            oprot.writeI32(_iter18.size());
+            for (String _iter19 : _iter18)
+            {
+              oprot.writeString(_iter19);
+            }
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TPathChanges struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.authzObj = iprot.readString();
+      struct.setAuthzObjIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list20 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+        struct.addPaths = new ArrayList<List<String>>(_list20.size);
+        for (int _i21 = 0; _i21 < _list20.size; ++_i21)
+        {
+          List<String> _elem22; // required
+          {
+            org.apache.thrift.protocol.TList _list23 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            _elem22 = new ArrayList<String>(_list23.size);
+            for (int _i24 = 0; _i24 < _list23.size; ++_i24)
+            {
+              String _elem25; // required
+              _elem25 = iprot.readString();
+              _elem22.add(_elem25);
+            }
+          }
+          struct.addPaths.add(_elem22);
+        }
+      }
+      struct.setAddPathsIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list26 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+        struct.delPaths = new ArrayList<List<String>>(_list26.size);
+        for (int _i27 = 0; _i27 < _list26.size; ++_i27)
+        {
+          List<String> _elem28; // required
+          {
+            org.apache.thrift.protocol.TList _list29 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            _elem28 = new ArrayList<String>(_list29.size);
+            for (int _i30 = 0; _i30 < _list29.size; ++_i30)
+            {
+              String _elem31; // required
+              _elem31 = iprot.readString();
+              _elem28.add(_elem31);
+            }
+          }
+          struct.delPaths.add(_elem28);
+        }
+      }
+      struct.setDelPathsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathEntry.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathEntry.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathEntry.java
new file mode 100644
index 0000000..a2a7f7b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathEntry.java
@@ -0,0 +1,747 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.sentry.hdfs.service.thrift;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TPathEntry implements org.apache.thrift.TBase<TPathEntry, TPathEntry._Fields>, java.io.Serializable, Cloneable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPathEntry");
+
+  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.BYTE, (short)1);
+  private static final org.apache.thrift.protocol.TField PATH_ELEMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("pathElement", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField AUTHZ_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("authzObj", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField CHILDREN_FIELD_DESC = new org.apache.thrift.protocol.TField("children", org.apache.thrift.protocol.TType.SET, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TPathEntryStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TPathEntryTupleSchemeFactory());
+  }
+
+  private byte type; // required
+  private String pathElement; // required
+  private String authzObj; // optional
+  private Set<Integer> children; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TYPE((short)1, "type"),
+    PATH_ELEMENT((short)2, "pathElement"),
+    AUTHZ_OBJ((short)3, "authzObj"),
+    CHILDREN((short)4, "children");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TYPE
+          return TYPE;
+        case 2: // PATH_ELEMENT
+          return PATH_ELEMENT;
+        case 3: // AUTHZ_OBJ
+          return AUTHZ_OBJ;
+        case 4: // CHILDREN
+          return CHILDREN;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TYPE_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private _Fields optionals[] = {_Fields.AUTHZ_OBJ};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE)));
+    tmpMap.put(_Fields.PATH_ELEMENT, new org.apache.thrift.meta_data.FieldMetaData("pathElement", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.AUTHZ_OBJ, new org.apache.thrift.meta_data.FieldMetaData("authzObj", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CHILDREN, new org.apache.thrift.meta_data.FieldMetaData("children", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPathEntry.class, metaDataMap);
+  }
+
+  public TPathEntry() {
+  }
+
+  public TPathEntry(
+    byte type,
+    String pathElement,
+    Set<Integer> children)
+  {
+    this();
+    this.type = type;
+    setTypeIsSet(true);
+    this.pathElement = pathElement;
+    this.children = children;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TPathEntry(TPathEntry other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.type = other.type;
+    if (other.isSetPathElement()) {
+      this.pathElement = other.pathElement;
+    }
+    if (other.isSetAuthzObj()) {
+      this.authzObj = other.authzObj;
+    }
+    if (other.isSetChildren()) {
+      Set<Integer> __this__children = new HashSet<Integer>();
+      for (Integer other_element : other.children) {
+        __this__children.add(other_element);
+      }
+      this.children = __this__children;
+    }
+  }
+
+  public TPathEntry deepCopy() {
+    return new TPathEntry(this);
+  }
+
+  @Override
+  public void clear() {
+    setTypeIsSet(false);
+    this.type = 0;
+    this.pathElement = null;
+    this.authzObj = null;
+    this.children = null;
+  }
+
+  public byte getType() {
+    return this.type;
+  }
+
+  public void setType(byte type) {
+    this.type = type;
+    setTypeIsSet(true);
+  }
+
+  public void unsetType() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TYPE_ISSET_ID);
+  }
+
+  /** Returns true if field type is set (has been assigned a value) and false otherwise */
+  public boolean isSetType() {
+    return EncodingUtils.testBit(__isset_bitfield, __TYPE_ISSET_ID);
+  }
+
+  public void setTypeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TYPE_ISSET_ID, value);
+  }
+
+  public String getPathElement() {
+    return this.pathElement;
+  }
+
+  public void setPathElement(String pathElement) {
+    this.pathElement = pathElement;
+  }
+
+  public void unsetPathElement() {
+    this.pathElement = null;
+  }
+
+  /** Returns true if field pathElement is set (has been assigned a value) and false otherwise */
+  public boolean isSetPathElement() {
+    return this.pathElement != null;
+  }
+
+  public void setPathElementIsSet(boolean value) {
+    if (!value) {
+      this.pathElement = null;
+    }
+  }
+
+  public String getAuthzObj() {
+    return this.authzObj;
+  }
+
+  public void setAuthzObj(String authzObj) {
+    this.authzObj = authzObj;
+  }
+
+  public void unsetAuthzObj() {
+    this.authzObj = null;
+  }
+
+  /** Returns true if field authzObj is set (has been assigned a value) and false otherwise */
+  public boolean isSetAuthzObj() {
+    return this.authzObj != null;
+  }
+
+  public void setAuthzObjIsSet(boolean value) {
+    if (!value) {
+      this.authzObj = null;
+    }
+  }
+
+  public int getChildrenSize() {
+    return (this.children == null) ? 0 : this.children.size();
+  }
+
+  public java.util.Iterator<Integer> getChildrenIterator() {
+    return (this.children == null) ? null : this.children.iterator();
+  }
+
+  public void addToChildren(int elem) {
+    if (this.children == null) {
+      this.children = new HashSet<Integer>();
+    }
+    this.children.add(elem);
+  }
+
+  public Set<Integer> getChildren() {
+    return this.children;
+  }
+
+  public void setChildren(Set<Integer> children) {
+    this.children = children;
+  }
+
+  public void unsetChildren() {
+    this.children = null;
+  }
+
+  /** Returns true if field children is set (has been assigned a value) and false otherwise */
+  public boolean isSetChildren() {
+    return this.children != null;
+  }
+
+  public void setChildrenIsSet(boolean value) {
+    if (!value) {
+      this.children = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TYPE:
+      if (value == null) {
+        unsetType();
+      } else {
+        setType((Byte)value);
+      }
+      break;
+
+    case PATH_ELEMENT:
+      if (value == null) {
+        unsetPathElement();
+      } else {
+        setPathElement((String)value);
+      }
+      break;
+
+    case AUTHZ_OBJ:
+      if (value == null) {
+        unsetAuthzObj();
+      } else {
+        setAuthzObj((String)value);
+      }
+      break;
+
+    case CHILDREN:
+      if (value == null) {
+        unsetChildren();
+      } else {
+        setChildren((Set<Integer>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TYPE:
+      return Byte.valueOf(getType());
+
+    case PATH_ELEMENT:
+      return getPathElement();
+
+    case AUTHZ_OBJ:
+      return getAuthzObj();
+
+    case CHILDREN:
+      return getChildren();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TYPE:
+      return isSetType();
+    case PATH_ELEMENT:
+      return isSetPathElement();
+    case AUTHZ_OBJ:
+      return isSetAuthzObj();
+    case CHILDREN:
+      return isSetChildren();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TPathEntry)
+      return this.equals((TPathEntry)that);
+    return false;
+  }
+
+  public boolean equals(TPathEntry that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_type = true;
+    boolean that_present_type = true;
+    if (this_present_type || that_present_type) {
+      if (!(this_present_type && that_present_type))
+        return false;
+      if (this.type != that.type)
+        return false;
+    }
+
+    boolean this_present_pathElement = true && this.isSetPathElement();
+    boolean that_present_pathElement = true && that.isSetPathElement();
+    if (this_present_pathElement || that_present_pathElement) {
+      if (!(this_present_pathElement && that_present_pathElement))
+        return false;
+      if (!this.pathElement.equals(that.pathElement))
+        return false;
+    }
+
+    boolean this_present_authzObj = true && this.isSetAuthzObj();
+    boolean that_present_authzObj = true && that.isSetAuthzObj();
+    if (this_present_authzObj || that_present_authzObj) {
+      if (!(this_present_authzObj && that_present_authzObj))
+        return false;
+      if (!this.authzObj.equals(that.authzObj))
+        return false;
+    }
+
+    boolean this_present_children = true && this.isSetChildren();
+    boolean that_present_children = true && that.isSetChildren();
+    if (this_present_children || that_present_children) {
+      if (!(this_present_children && that_present_children))
+        return false;
+      if (!this.children.equals(that.children))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_type = true;
+    builder.append(present_type);
+    if (present_type)
+      builder.append(type);
+
+    boolean present_pathElement = true && (isSetPathElement());
+    builder.append(present_pathElement);
+    if (present_pathElement)
+      builder.append(pathElement);
+
+    boolean present_authzObj = true && (isSetAuthzObj());
+    builder.append(present_authzObj);
+    if (present_authzObj)
+      builder.append(authzObj);
+
+    boolean present_children = true && (isSetChildren());
+    builder.append(present_children);
+    if (present_children)
+      builder.append(children);
+
+    return builder.toHashCode();
+  }
+
+  public int compareTo(TPathEntry other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+    TPathEntry typedOther = (TPathEntry)other;
+
+    lastComparison = Boolean.valueOf(isSetType()).compareTo(typedOther.isSetType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, typedOther.type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPathElement()).compareTo(typedOther.isSetPathElement());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPathElement()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pathElement, typedOther.pathElement);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAuthzObj()).compareTo(typedOther.isSetAuthzObj());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAuthzObj()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authzObj, typedOther.authzObj);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetChildren()).compareTo(typedOther.isSetChildren());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetChildren()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.children, typedOther.children);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TPathEntry(");
+    boolean first = true;
+
+    sb.append("type:");
+    sb.append(this.type);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("pathElement:");
+    if (this.pathElement == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.pathElement);
+    }
+    first = false;
+    if (isSetAuthzObj()) {
+      if (!first) sb.append(", ");
+      sb.append("authzObj:");
+      if (this.authzObj == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.authzObj);
+      }
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("children:");
+    if (this.children == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.children);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetType()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'type' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPathElement()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'pathElement' is unset! Struct:" + toString());
+    }
+
+    if (!isSetChildren()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'children' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TPathEntryStandardSchemeFactory implements SchemeFactory {
+    public TPathEntryStandardScheme getScheme() {
+      return new TPathEntryStandardScheme();
+    }
+  }
+
+  private static class TPathEntryStandardScheme extends StandardScheme<TPathEntry> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TPathEntry struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) {
+              struct.type = iprot.readByte();
+              struct.setTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PATH_ELEMENT
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.pathElement = iprot.readString();
+              struct.setPathElementIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // AUTHZ_OBJ
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.authzObj = iprot.readString();
+              struct.setAuthzObjIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // CHILDREN
+            if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
+              {
+                org.apache.thrift.protocol.TSet _set32 = iprot.readSetBegin();
+                struct.children = new HashSet<Integer>(2*_set32.size);
+                for (int _i33 = 0; _i33 < _set32.size; ++_i33)
+                {
+                  int _elem34; // required
+                  _elem34 = iprot.readI32();
+                  struct.children.add(_elem34);
+                }
+                iprot.readSetEnd();
+              }
+              struct.setChildrenIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TPathEntry struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(TYPE_FIELD_DESC);
+      oprot.writeByte(struct.type);
+      oprot.writeFieldEnd();
+      if (struct.pathElement != null) {
+        oprot.writeFieldBegin(PATH_ELEMENT_FIELD_DESC);
+        oprot.writeString(struct.pathElement);
+        oprot.writeFieldEnd();
+      }
+      if (struct.authzObj != null) {
+        if (struct.isSetAuthzObj()) {
+          oprot.writeFieldBegin(AUTHZ_OBJ_FIELD_DESC);
+          oprot.writeString(struct.authzObj);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.children != null) {
+        oprot.writeFieldBegin(CHILDREN_FIELD_DESC);
+        {
+          oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I32, struct.children.size()));
+          for (int _iter35 : struct.children)
+          {
+            oprot.writeI32(_iter35);
+          }
+          oprot.writeSetEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TPathEntryTupleSchemeFactory implements SchemeFactory {
+    public TPathEntryTupleScheme getScheme() {
+      return new TPathEntryTupleScheme();
+    }
+  }
+
+  private static class TPathEntryTupleScheme extends TupleScheme<TPathEntry> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TPathEntry struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeByte(struct.type);
+      oprot.writeString(struct.pathElement);
+      {
+        oprot.writeI32(struct.children.size());
+        for (int _iter36 : struct.children)
+        {
+          oprot.writeI32(_iter36);
+        }
+      }
+      BitSet optionals = new BitSet();
+      if (struct.isSetAuthzObj()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetAuthzObj()) {
+        oprot.writeString(struct.authzObj);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TPathEntry struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.type = iprot.readByte();
+      struct.setTypeIsSet(true);
+      struct.pathElement = iprot.readString();
+      struct.setPathElementIsSet(true);
+      {
+        org.apache.thrift.protocol.TSet _set37 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I32, iprot.readI32());
+        struct.children = new HashSet<Integer>(2*_set37.size);
+        for (int _i38 = 0; _i38 < _set37.size; ++_i38)
+        {
+          int _elem39; // required
+          _elem39 = iprot.readI32();
+          struct.children.add(_elem39);
+        }
+      }
+      struct.setChildrenIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.authzObj = iprot.readString();
+        struct.setAuthzObjIsSet(true);
+      }
+    }
+  }
+
+}
+


[3/9] incubator-sentry git commit: SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

Posted by ls...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryPermissions.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryPermissions.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryPermissions.java
new file mode 100644
index 0000000..4b27e7b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryPermissions.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+
+import com.google.common.collect.Lists;
+
+public class SentryPermissions implements AuthzPermissions {
+  
+  public static class PrivilegeInfo {
+    private final String authzObj;
+    private final Map<String, FsAction> roleToPermission = new HashMap<String, FsAction>();
+    public PrivilegeInfo(String authzObj) {
+      this.authzObj = authzObj;
+    }
+    public PrivilegeInfo setPermission(String role, FsAction perm) {
+      roleToPermission.put(role, perm);
+      return this;
+    }
+    public PrivilegeInfo removePermission(String role) {
+      roleToPermission.remove(role);
+      return this;
+    }
+    public FsAction getPermission(String role) {
+      return roleToPermission.get(role);
+    }
+    public Map<String, FsAction> getAllPermissions() {
+      return roleToPermission;
+    }
+    public String getAuthzObj() {
+      return authzObj;
+    }
+  }
+
+  public static class RoleInfo {
+    private final String role;
+    private final Set<String> groups = new HashSet<String>();
+    public RoleInfo(String role) {
+      this.role = role;
+    }
+    public RoleInfo addGroup(String group) {
+      groups.add(group);
+      return this;
+    }
+    public RoleInfo delGroup(String group) {
+      groups.remove(group);
+      return this;
+    }
+    public String getRole() {
+      return role;
+    }
+    public Set<String> getAllGroups() {
+      return groups;
+    }
+  }
+
+  private final Map<String, PrivilegeInfo> privileges = new HashMap<String, PrivilegeInfo>();
+  private final Map<String, RoleInfo> roles = new HashMap<String, RoleInfo>();
+  private Map<String, Set<String>> authzObjChildren = new HashMap<String, Set<String>>();
+
+  String getParentAuthzObject(String authzObject) {
+    int dot = authzObject.indexOf('.');
+    if (dot > 0) {
+      return authzObject.substring(0, dot);
+    } else {
+      return authzObject;
+    }
+  }
+
+  void addParentChildMappings(String authzObject) {
+    String parent = getParentAuthzObject(authzObject);
+    if (parent != null) {
+      Set<String> children = authzObjChildren.get(parent);
+      if (children == null) {
+        children = new HashSet<String>();
+        authzObjChildren.put(parent, children);
+      }
+      children.add(authzObject);
+    }
+  }
+
+  void removeParentChildMappings(String authzObject) {
+    String parent = getParentAuthzObject(authzObject);
+    if (parent != null) {
+      Set<String> children = authzObjChildren.get(parent);
+      if (children != null) {
+        children.remove(authzObject);
+      }
+    } else {
+      // is parent
+      authzObjChildren.remove(authzObject);
+    }
+  }
+
+  private Map<String, FsAction> getGroupPerms(String authzObj) {
+    Map<String, FsAction> groupPerms = new HashMap<String, FsAction>();
+    if (authzObj == null) {
+      return groupPerms;
+    }
+    PrivilegeInfo privilegeInfo = privileges.get(authzObj);
+    if (privilegeInfo != null) {
+      for (Map.Entry<String, FsAction> privs : privilegeInfo
+          .getAllPermissions().entrySet()) {
+        constructAclEntry(privs.getKey(), privs.getValue(), groupPerms);
+      }
+    }
+    return groupPerms;
+  }
+
+  @Override
+  public List<AclEntry> getAcls(String authzObj) {
+    Map<String, FsAction> groupPerms = getGroupPerms(authzObj);
+    String parent = getParentAuthzObject(authzObj);
+    Map<String, FsAction> pGroupPerms = null;
+    if (parent == null) {
+      pGroupPerms = new HashMap<String, FsAction>();
+    } else {
+      pGroupPerms = getGroupPerms(getParentAuthzObject(authzObj));
+      if ((groupPerms == null)||(groupPerms.size() == 0)) {
+        groupPerms = pGroupPerms;
+      }
+    }
+    List<AclEntry> retList = new LinkedList<AclEntry>();
+    for (Map.Entry<String, FsAction> groupPerm : groupPerms.entrySet()) {
+      AclEntry.Builder builder = new AclEntry.Builder();
+      builder.setName(groupPerm.getKey());
+      builder.setType(AclEntryType.GROUP);
+      builder.setScope(AclEntryScope.ACCESS);
+      FsAction action = groupPerm.getValue();
+      FsAction pAction = pGroupPerms.get(groupPerm.getKey());
+      if (pAction != null) {
+        action.or(pAction);
+      }
+      if ((action == FsAction.READ) || (action == FsAction.WRITE)
+          || (action == FsAction.READ_WRITE)) {
+        action = action.or(FsAction.EXECUTE);
+      }
+      builder.setPermission(action);
+      retList.add(builder.build());
+    }
+    return retList;
+  }
+
+  private void constructAclEntry(String role, FsAction permission,
+      Map<String, FsAction> groupPerms) {
+    RoleInfo roleInfo = roles.get(role);
+    if (roleInfo != null) {
+      for (String group : roleInfo.groups) {
+        FsAction fsAction = groupPerms.get(group);
+        if (fsAction == null) {
+          fsAction = FsAction.NONE;
+        }
+        groupPerms.put(group, fsAction.or(permission));
+      }
+    }
+  }
+
+  public PrivilegeInfo getPrivilegeInfo(String authzObj) {
+    return privileges.get(authzObj);
+  }
+
+  Collection<PrivilegeInfo> getAllPrivileges() {
+    return privileges.values();
+  }
+
+  Collection<RoleInfo> getAllRoles() {
+    return roles.values();
+  }
+
+  public void delPrivilegeInfo(String authzObj) {
+    privileges.remove(authzObj);
+  }
+
+  public void addPrivilegeInfo(PrivilegeInfo privilegeInfo) {
+    privileges.put(privilegeInfo.authzObj, privilegeInfo);
+  }
+
+  public Set<String> getChildren(String authzObj) {
+    return authzObjChildren.get(authzObj);
+  }
+
+  public RoleInfo getRoleInfo(String role) {
+    return roles.get(role);
+  }
+
+  public void delRoleInfo(String role) {
+    roles.remove(role);
+  }
+
+  public void addRoleInfo(RoleInfo roleInfo) {
+    roles.put(roleInfo.role, roleInfo);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryUpdater.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryUpdater.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryUpdater.java
new file mode 100644
index 0000000..9540397
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryUpdater.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.sentry.hdfs.SentryHDFSServiceClient.SentryAuthzUpdate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SentryUpdater {
+
+  private SentryHDFSServiceClient sentryClient;
+  private final Configuration conf;
+  private final SentryAuthorizationInfo authzInfo;
+
+  private static Logger LOG = LoggerFactory.getLogger(SentryUpdater.class);
+
+  public SentryUpdater(Configuration conf, SentryAuthorizationInfo authzInfo) throws Exception {
+    this.conf = conf;
+    this.authzInfo = authzInfo;
+  }
+
+  public SentryAuthzUpdate getUpdates() {
+    if (sentryClient == null) {
+      try {
+        sentryClient = new SentryHDFSServiceClient(conf);
+      } catch (Exception e) {
+        LOG.error("Error connecting to Sentry ['{}'] !!",
+            e.getMessage());
+        sentryClient = null;
+        return null;
+      }
+    }
+    try {
+      SentryAuthzUpdate sentryUpdates = sentryClient.getAllUpdatesFrom(
+          authzInfo.getAuthzPermissions().getLastUpdatedSeqNum() + 1,
+          authzInfo.getAuthzPaths().getLastUpdatedSeqNum() + 1);
+      return sentryUpdates;
+    } catch (Exception e)  {
+      sentryClient = null;
+      LOG.error("Error receiving updates from Sentry !!", e);
+      return null;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPermissions.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPermissions.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPermissions.java
new file mode 100644
index 0000000..e5af802
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPermissions.java
@@ -0,0 +1,230 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReadWriteLock;
+
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.sentry.hdfs.SentryPermissions.PrivilegeInfo;
+import org.apache.sentry.hdfs.SentryPermissions.RoleInfo;
+import org.apache.sentry.hdfs.service.thrift.TPrivilegeChanges;
+import org.apache.sentry.hdfs.service.thrift.TRoleChanges;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class UpdateableAuthzPermissions implements AuthzPermissions, Updateable<PermissionsUpdate> {
+  private static final int MAX_UPDATES_PER_LOCK_USE = 99;
+  private volatile SentryPermissions perms = new SentryPermissions();
+  private final AtomicLong seqNum = new AtomicLong(0);
+  
+  private static Logger LOG = LoggerFactory.getLogger(UpdateableAuthzPermissions.class);
+
+  public static Map<String, FsAction> ACTION_MAPPING = new HashMap<String, FsAction>();
+  static {
+    ACTION_MAPPING.put("ALL", FsAction.ALL);
+    ACTION_MAPPING.put("*", FsAction.ALL);
+    ACTION_MAPPING.put("SELECT", FsAction.READ_EXECUTE);
+    ACTION_MAPPING.put("select", FsAction.READ_EXECUTE);
+    ACTION_MAPPING.put("INSERT", FsAction.WRITE_EXECUTE);
+    ACTION_MAPPING.put("insert", FsAction.WRITE_EXECUTE);
+  }
+
+  @Override
+  public List<AclEntry> getAcls(String authzObj) {
+    return perms.getAcls(authzObj);
+  }
+
+  @Override
+  public UpdateableAuthzPermissions updateFull(PermissionsUpdate update) {
+    UpdateableAuthzPermissions other = new UpdateableAuthzPermissions();
+    other.applyPartialUpdate(update);
+    other.seqNum.set(update.getSeqNum());
+    return other;
+  }
+
+  @Override
+  public void updatePartial(Iterable<PermissionsUpdate> updates, ReadWriteLock lock) {
+    lock.writeLock().lock();
+    try {
+      int counter = 0;
+      for (PermissionsUpdate update : updates) {
+        applyPartialUpdate(update);
+        if (++counter > MAX_UPDATES_PER_LOCK_USE) {
+          counter = 0;
+          lock.writeLock().unlock();
+          lock.writeLock().lock();
+        }
+        seqNum.set(update.getSeqNum());
+        LOG.debug("##### Updated perms seq Num [" + seqNum.get() + "]");
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+  
+
+  private void applyPartialUpdate(PermissionsUpdate update) {
+    applyPrivilegeUpdates(update);
+    applyRoleUpdates(update);
+  }
+
+  private void applyRoleUpdates(PermissionsUpdate update) {
+    for (TRoleChanges rUpdate : update.getRoleUpdates()) {
+      if (rUpdate.getRole().equals(PermissionsUpdate.ALL_ROLES)) {
+        // Request to remove group from all roles
+        String groupToRemove = rUpdate.getDelGroups().iterator().next();
+        for (RoleInfo rInfo : perms.getAllRoles()) {
+          rInfo.delGroup(groupToRemove);
+        }
+      }
+      RoleInfo rInfo = perms.getRoleInfo(rUpdate.getRole());
+      for (String group : rUpdate.getAddGroups()) {
+        if (rInfo == null) {
+          rInfo = new RoleInfo(rUpdate.getRole());
+        }
+        rInfo.addGroup(group);
+      }
+      if (rInfo != null) {
+        perms.addRoleInfo(rInfo);
+        for (String group : rUpdate.getDelGroups()) {
+          if (group.equals(PermissionsUpdate.ALL_GROUPS)) {
+            perms.delRoleInfo(rInfo.getRole());
+            break;
+          }
+          // If there are no groups to remove, rUpdate.getDelGroups() will
+          // return empty list and this code will not be reached
+          rInfo.delGroup(group);
+        }
+      }
+    }
+  }
+
+  private void applyPrivilegeUpdates(PermissionsUpdate update) {
+    for (TPrivilegeChanges pUpdate : update.getPrivilegeUpdates()) {
+      if (pUpdate.getAuthzObj().equals(PermissionsUpdate.RENAME_PRIVS)) {
+        String newAuthzObj = pUpdate.getAddPrivileges().keySet().iterator().next();
+        String oldAuthzObj = pUpdate.getDelPrivileges().keySet().iterator().next();
+        PrivilegeInfo privilegeInfo = perms.getPrivilegeInfo(oldAuthzObj);
+        Map<String, FsAction> allPermissions = privilegeInfo.getAllPermissions();
+        perms.delPrivilegeInfo(oldAuthzObj);
+        perms.removeParentChildMappings(oldAuthzObj);
+        PrivilegeInfo newPrivilegeInfo = new PrivilegeInfo(newAuthzObj);
+        for (Map.Entry<String, FsAction> e : allPermissions.entrySet()) {
+          newPrivilegeInfo.setPermission(e.getKey(), e.getValue());
+        }
+        perms.addPrivilegeInfo(newPrivilegeInfo);
+        perms.addParentChildMappings(newAuthzObj);
+        return;
+      }
+      if (pUpdate.getAuthzObj().equals(PermissionsUpdate.ALL_AUTHZ_OBJ)) {
+        // Request to remove role from all Privileges
+        String roleToRemove = pUpdate.getDelPrivileges().keySet().iterator()
+            .next();
+        for (PrivilegeInfo pInfo : perms.getAllPrivileges()) {
+          pInfo.removePermission(roleToRemove);
+        }
+      }
+      PrivilegeInfo pInfo = perms.getPrivilegeInfo(pUpdate.getAuthzObj());
+      for (Map.Entry<String, String> aMap : pUpdate.getAddPrivileges().entrySet()) {
+        if (pInfo == null) {
+          pInfo = new PrivilegeInfo(pUpdate.getAuthzObj());
+        }
+        FsAction fsAction = pInfo.getPermission(aMap.getKey());
+        if (fsAction == null) {
+          fsAction = getFAction(aMap.getValue());
+        } else {
+          fsAction = fsAction.or(getFAction(aMap.getValue()));
+        }
+        pInfo.setPermission(aMap.getKey(), fsAction);
+      }
+      if (pInfo != null) {
+        perms.addPrivilegeInfo(pInfo);
+        perms.addParentChildMappings(pUpdate.getAuthzObj());
+        for (Map.Entry<String, String> dMap : pUpdate.getDelPrivileges().entrySet()) {
+          if (dMap.getKey().equals(PermissionsUpdate.ALL_ROLES)) {
+            // Remove all privileges
+            perms.delPrivilegeInfo(pUpdate.getAuthzObj());
+            perms.removeParentChildMappings(pUpdate.getAuthzObj());
+            break;
+          }
+          List<PrivilegeInfo> parentAndChild = new LinkedList<PrivilegeInfo>();
+          parentAndChild.add(pInfo);
+          Set<String> children = perms.getChildren(pInfo.getAuthzObj());
+          if (children != null) {
+            for (String child : children) {
+              parentAndChild.add(perms.getPrivilegeInfo(child));
+            }
+          }
+          // recursive revoke
+          for (PrivilegeInfo pInfo2 : parentAndChild) {
+            FsAction fsAction = pInfo2.getPermission(dMap.getKey());
+            if (fsAction != null) {
+              fsAction = fsAction.and(getFAction(dMap.getValue()).not());
+              if (FsAction.NONE == fsAction) {
+                pInfo2.removePermission(dMap.getKey());
+              } else {
+                pInfo2.setPermission(dMap.getKey(), fsAction);
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  static FsAction getFAction(String sentryPriv) {
+    String[] strPrivs = sentryPriv.trim().split(",");
+    FsAction retVal = FsAction.NONE;
+    for (String strPriv : strPrivs) {
+      retVal = retVal.or(ACTION_MAPPING.get(strPriv.toUpperCase()));
+    }
+    return retVal;
+  }
+
+  @Override
+  public long getLastUpdatedSeqNum() {
+    return seqNum.get();
+  }
+
+  @Override
+  public PermissionsUpdate createFullImageUpdate(long currSeqNum) {
+    PermissionsUpdate retVal = new PermissionsUpdate(currSeqNum, true);
+    for (PrivilegeInfo pInfo : perms.getAllPrivileges()) {
+      TPrivilegeChanges pUpdate = retVal.addPrivilegeUpdate(pInfo.getAuthzObj());
+      for (Map.Entry<String, FsAction> ent : pInfo.getAllPermissions().entrySet()) {
+        pUpdate.putToAddPrivileges(ent.getKey(), ent.getValue().SYMBOL);
+      }
+    }
+    for (RoleInfo rInfo : perms.getAllRoles()) {
+      TRoleChanges rUpdate = retVal.addRoleUpdate(rInfo.getRole());
+      for (String group : rInfo.getAllGroups()) {
+        rUpdate.addToAddGroups(group);
+      }
+    }
+    return retVal;
+  }
+
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/MockSentryAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/MockSentryAuthorizationProvider.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/MockSentryAuthorizationProvider.java
new file mode 100644
index 0000000..2085b52
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/MockSentryAuthorizationProvider.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+public class MockSentryAuthorizationProvider extends
+    SentryAuthorizationProvider {
+
+  public MockSentryAuthorizationProvider() {
+    super(new SentryAuthorizationInfoX());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/SentryAuthorizationInfoX.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/SentryAuthorizationInfoX.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/SentryAuthorizationInfoX.java
new file mode 100644
index 0000000..7a1539b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/SentryAuthorizationInfoX.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+
+public class SentryAuthorizationInfoX extends SentryAuthorizationInfo {
+
+  public SentryAuthorizationInfoX() {
+    super();
+  }
+
+  @Override
+  public void run() {
+    
+  }
+
+  @Override
+  public void start() {
+
+  }
+
+  @Override
+  public void stop() {
+
+  }
+
+  @Override
+  public boolean isStale() {
+    return false;
+  }
+
+  private static final String[] MANAGED = {"user", "authz"};
+  private static final String[] AUTHZ_OBJ = {"user", "authz", "obj"};
+
+  private boolean hasPrefix(String[] prefix, String[] pathElement) {
+    int i = 0;
+    for (; i < prefix.length && i < pathElement.length; i ++) {
+      if (!prefix[i].equals(pathElement[i])) {
+        return false;
+      }
+    }    
+    return (i == prefix.length);
+  }
+  
+  @Override
+  public boolean isManaged(String[] pathElements) {
+    return hasPrefix(MANAGED, pathElements);
+  }
+
+  @Override
+  public boolean doesBelongToAuthzObject(String[] pathElements) {
+    return hasPrefix(AUTHZ_OBJ, pathElements);
+  }
+
+  @Override
+  public List<AclEntry> getAclEntries(String[] pathElements) {
+    AclEntry acl = new AclEntry.Builder().setType(AclEntryType.USER).
+        setPermission(FsAction.ALL).setName("user-authz").
+        setScope(AclEntryScope.ACCESS).build();
+    return Arrays.asList(acl);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryAuthorizationProvider.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryAuthorizationProvider.java
new file mode 100644
index 0000000..b766a8f
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/java/org/apache/sentry/hdfs/TestSentryAuthorizationProvider.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.LinkedHashSet;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+
+public class TestSentryAuthorizationProvider {
+  private MiniDFSCluster miniDFS;
+  private UserGroupInformation admin;
+  
+  @Before
+  public void setUp() throws Exception {
+    admin = UserGroupInformation.createUserForTesting(
+        System.getProperty("user.name"), new String[] { "supergroup" });
+    admin.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
+        Configuration conf = new HdfsConfiguration();
+        conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
+        conf.set(DFSConfigKeys.DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
+            MockSentryAuthorizationProvider.class.getName());
+        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+        EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+        miniDFS = new MiniDFSCluster.Builder(conf).build();
+        return null;
+      }
+    });
+  }
+
+  @After
+  public void cleanUp() throws IOException {
+    if (miniDFS != null) {
+      miniDFS.shutdown();
+    }
+  }
+
+  @Test
+  public void testProvider() throws Exception {
+    admin.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        String sysUser = UserGroupInformation.getCurrentUser().getShortUserName();
+        FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+
+        List<AclEntry> baseAclList = new ArrayList<AclEntry>();
+        AclEntry.Builder builder = new AclEntry.Builder();
+        baseAclList.add(builder.setType(AclEntryType.USER)
+            .setScope(AclEntryScope.ACCESS).build());
+        baseAclList.add(builder.setType(AclEntryType.GROUP)
+            .setScope(AclEntryScope.ACCESS).build());
+        baseAclList.add(builder.setType(AclEntryType.OTHER)
+            .setScope(AclEntryScope.ACCESS).build());
+        Path path1 = new Path("/user/authz/obj/xxx");
+        fs.mkdirs(path1);
+        fs.setAcl(path1, baseAclList);
+
+        fs.mkdirs(new Path("/user/authz/xxx"));
+        fs.mkdirs(new Path("/user/xxx"));
+
+        // root
+        Path path = new Path("/");
+        Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner());
+        Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup());
+        Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission());
+        Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty());
+
+        // dir before prefixes
+        path = new Path("/user");
+        Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner());
+        Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup());
+        Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission());
+        Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty());
+
+        // prefix dir
+        path = new Path("/user/authz");
+        Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner());
+        Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup());
+        Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission());
+        Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty());
+
+        // dir inside of prefix, no obj
+        path = new Path("/user/authz/xxx");
+        FileStatus status = fs.getFileStatus(path);
+        Assert.assertEquals(sysUser, status.getOwner());
+        Assert.assertEquals("supergroup", status.getGroup());
+        Assert.assertEquals(new FsPermission((short) 0755), status.getPermission());
+        Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty());
+
+        // dir inside of prefix, obj
+        path = new Path("/user/authz/obj");
+        Assert.assertEquals("hive", fs.getFileStatus(path).getOwner());
+        Assert.assertEquals("hive", fs.getFileStatus(path).getGroup());
+        Assert.assertEquals(new FsPermission((short) 0770), fs.getFileStatus(path).getPermission());
+        Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty());
+
+        List<AclEntry> acls = new ArrayList<AclEntry>();
+        acls.add(new AclEntry.Builder().setName(sysUser).setType(AclEntryType.USER).setScope(AclEntryScope.ACCESS).setPermission(FsAction.ALL).build());
+        acls.add(new AclEntry.Builder().setName("supergroup").setType(AclEntryType.GROUP).setScope(AclEntryScope.ACCESS).setPermission(FsAction.READ_EXECUTE).build());
+        acls.add(new AclEntry.Builder().setName(null).setType(AclEntryType.OTHER).setScope(AclEntryScope.ACCESS).setPermission(FsAction.READ_EXECUTE).build());
+        acls.add(new AclEntry.Builder().setName("user-authz").setType(AclEntryType.USER).setScope(AclEntryScope.ACCESS).setPermission(FsAction.ALL).build());
+        Assert.assertEquals(new LinkedHashSet<AclEntry>(acls), new LinkedHashSet<AclEntry>(fs.getAclStatus(path).getEntries()));
+
+        // dir inside of prefix, inside of obj
+        path = new Path("/user/authz/obj/xxx");
+        Assert.assertEquals("hive", fs.getFileStatus(path).getOwner());
+        Assert.assertEquals("hive", fs.getFileStatus(path).getGroup());
+        Assert.assertEquals(new FsPermission((short) 0770), fs.getFileStatus(path).getPermission());
+        Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty());
+        
+        Path path2 = new Path("/user/authz/obj/path2");
+        fs.mkdirs(path2);
+        fs.setAcl(path2, baseAclList);
+
+        // dir outside of prefix
+        path = new Path("/user/xxx");
+        Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner());
+        Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup());
+        Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission());
+        Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty());
+        return null;
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml
new file mode 100644
index 0000000..511bfdd
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+  <property>
+    <name>sentry.hdfs-plugin.path-prefixes</name>
+    <value>/user/hive/dw</value>
+  </property>
+  <property>
+    <name>sentry.hdfs-plugin.sentry-uri</name>
+    <value>thrift://localhost:1234</value>
+  </property>
+  <property>
+    <name>sentry.hdfs-plugin.stale-threshold.ms</name>
+    <value>-1</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/.gitignore
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/.gitignore b/sentry-hdfs/sentry-hdfs-service/.gitignore
new file mode 100644
index 0000000..91ad75b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/.gitignore
@@ -0,0 +1,18 @@
+*.class
+target/
+.classpath
+.project
+.settings
+.metadata
+.idea/
+*.iml
+derby.log
+datanucleus.log
+sentry-core/sentry-core-common/src/gen
+**/TempStatsStore/
+# Package Files #
+*.jar
+*.war
+*.ear
+test-output/
+maven-repo/

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/pom.xml b/sentry-hdfs/sentry-hdfs-service/pom.xml
new file mode 100644
index 0000000..365380e
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/pom.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.sentry</groupId>
+    <artifactId>sentry-hdfs</artifactId>
+    <version>1.5.0-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>sentry-hdfs-service</artifactId>
+  <name>Sentry HDFS service</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.shiro</groupId>
+      <artifactId>shiro-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-provider-db</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-exec</artifactId>
+      <version>${hive.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-shims</artifactId>
+      <version>${hive.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.thrift</groupId>
+      <artifactId>libfb303</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.thrift</groupId>
+      <artifactId>libthrift</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>ant-contrib</groupId>
+      <artifactId>ant-contrib</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-metastore</artifactId>
+      <version>${hive.version}</version>
+    </dependency>
+  </dependencies>
+
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java
new file mode 100644
index 0000000..e7677f2
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Implementation of {@link MetastoreClient}
+ *
+ */
+public class ExtendedMetastoreClient implements MetastoreClient {
+
+  private static Logger LOG = LoggerFactory.getLogger(ExtendedMetastoreClient.class);
+
+  private volatile HiveMetaStoreClient client;
+  private final HiveConf hiveConf;
+  public ExtendedMetastoreClient(HiveConf hiveConf) {
+    this.hiveConf = hiveConf;
+  }
+
+  @Override
+  public List<Database> getAllDatabases() {
+    List<Database> retList = new ArrayList<Database>();
+    HiveMetaStoreClient client = getClient();
+    if (client != null) {
+      try {
+        for (String dbName : client.getAllDatabases()) {
+          retList.add(client.getDatabase(dbName));
+        }
+      } catch (Exception e) {
+        LOG.error("Could not get All Databases !!", e);
+      }
+    }
+    return retList;
+  }
+
+  @Override
+  public List<Table> getAllTablesOfDatabase(Database db) {
+    List<Table> retList = new ArrayList<Table>();
+    HiveMetaStoreClient client = getClient();
+    if (client != null) {
+      try {
+        for (String tblName : client.getAllTables(db.getName())) {
+          retList.add(client.getTable(db.getName(), tblName));
+        }
+      } catch (Exception e) {
+        LOG.error(String.format(
+            "Could not get Tables for '%s' !!", db.getName()), e);
+      }
+    }
+    return retList;
+  }
+
+  @Override
+  public List<Partition> listAllPartitions(Database db, Table tbl) {
+    HiveMetaStoreClient client = getClient();
+    if (client != null) {
+      try {
+        return client.listPartitions(db.getName(), tbl.getTableName(), Short.MAX_VALUE);
+      } catch (Exception e) {
+        LOG.error(String.format(
+            "Could not get partitions for '%s'.'%s' !!", db.getName(),
+            tbl.getTableName()), e);
+      }
+    }
+    return new LinkedList<Partition>();
+  }
+
+  private HiveMetaStoreClient getClient() {
+    if (client == null) {
+      try {
+        client = new HiveMetaStoreClient(hiveConf);
+        return client;
+      } catch (MetaException e) {
+        client = null;
+        LOG.error("Could not create metastore client !!", e);
+        return null;
+      }
+    } else {
+      return client;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java
new file mode 100644
index 0000000..9a81e3a
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java
@@ -0,0 +1,257 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.MetaStorePreEventListener;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.sentry.hdfs.ServiceConstants.ServerConfig;
+import org.apache.sentry.hdfs.service.thrift.TPathChanges;
+import org.apache.sentry.provider.db.SentryMetastoreListenerPlugin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Plugin implementation of {@link SentryMetastoreListenerPlugin} that hooks
+ * into the sites in the {@link MetaStorePreEventListener} that deal with
+ * creation/updation and deletion for paths.
+ */
+public class MetastorePlugin extends SentryMetastoreListenerPlugin {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(MetastorePlugin.class);
+
+  private final Configuration conf;
+  private SentryHDFSServiceClient sentryClient;
+  private UpdateableAuthzPaths authzPaths;
+  private Lock notificiationLock;
+
+  //Initialized to some value > 1 so that the first update notification
+ // will trigger a full Image fetch
+  private final AtomicLong seqNum = new AtomicLong(5);
+  private volatile long lastSentSeqNum = -1;
+  private final ExecutorService threadPool;
+
+  static class ProxyHMSHandler extends HMSHandler {
+	public ProxyHMSHandler(String name, HiveConf conf) throws MetaException {
+		super(name, conf);
+	}
+	@Override
+	public String startFunction(String function, String extraLogInfo) {
+		return function;
+	}
+  }
+
+  public MetastorePlugin(Configuration conf) {
+    this.notificiationLock = new ReentrantLock();
+    this.conf = new HiveConf((HiveConf)conf);
+    this.conf.unset(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname);
+    this.conf.unset(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname);
+    this.conf.unset(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS.varname);
+    this.conf.unset(HiveConf.ConfVars.METASTOREURIS.varname);
+    try {
+      this.authzPaths = createInitialUpdate(new ProxyHMSHandler("sentry.hdfs", (HiveConf)this.conf));
+    } catch (Exception e1) {
+      LOGGER.error("Could not create Initial AuthzPaths or HMSHandler !!", e1);
+      throw new RuntimeException(e1);
+    }
+    try {
+      sentryClient = new SentryHDFSServiceClient(conf);
+    } catch (Exception e) {
+      sentryClient = null;
+      LOGGER.error("Could not connect to Sentry HDFS Service !!", e);
+    }
+    ScheduledExecutorService threadPool = Executors.newScheduledThreadPool(1);
+    threadPool.scheduleWithFixedDelay(new Runnable() {
+      @Override
+      public void run() {
+        notificiationLock.lock();
+        try {
+          long lastSeenHMSPathSeqNum =
+              MetastorePlugin.this.getClient().getLastSeenHMSPathSeqNum();
+          if (lastSeenHMSPathSeqNum != lastSentSeqNum) {
+            LOGGER.warn("Sentry not in sync with HMS [" + lastSeenHMSPathSeqNum + ", " + lastSentSeqNum + "]");
+            PathsUpdate fullImageUpdate =
+                MetastorePlugin.this.authzPaths.createFullImageUpdate(
+                    lastSentSeqNum);
+            LOGGER.warn("Sentry not in sync with HMS !!");
+            notifySentryNoLock(fullImageUpdate, false);
+          }
+        } catch (Exception e) {
+          sentryClient = null;
+          LOGGER.error("Error talking to Sentry HDFS Service !!", e);
+        } finally {
+          notificiationLock.unlock();
+        }
+      }
+    }, this.conf.getLong(ServerConfig.SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_MS,
+        ServerConfig.SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_DEFAULT), 1000,
+        TimeUnit.MILLISECONDS);
+    this.threadPool = threadPool;
+  }
+
+  private UpdateableAuthzPaths createInitialUpdate(IHMSHandler hmsHandler) throws Exception {
+    UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(new String[] {"/"});
+    PathsUpdate tempUpdate = new PathsUpdate(-1, false);
+    List<String> allDbStr = hmsHandler.get_all_databases();
+    for (String dbName : allDbStr) {
+      Database db = hmsHandler.get_database(dbName);
+      tempUpdate.newPathChange(db.getName()).addToAddPaths(
+          PathsUpdate.cleanPath(db.getLocationUri()));
+      List<String> allTblStr = hmsHandler.get_all_tables(db.getName());
+      for (String tblName : allTblStr) {
+        Table tbl = hmsHandler.get_table(db.getName(), tblName);
+        TPathChanges tblPathChange = tempUpdate.newPathChange(tbl
+            .getDbName() + "." + tbl.getTableName());
+        List<Partition> tblParts =
+            hmsHandler.get_partitions(db.getName(), tbl.getTableName(), (short) -1);
+        tblPathChange.addToAddPaths(PathsUpdate.cleanPath(tbl.getSd()
+            .getLocation() == null ? db.getLocationUri() : tbl
+            .getSd().getLocation()));
+        for (Partition part : tblParts) {
+          tblPathChange.addToAddPaths(PathsUpdate.cleanPath(part.getSd()
+              .getLocation()));
+        }
+      }
+    }
+    authzPaths.updatePartial(Lists.newArrayList(tempUpdate),
+        new ReentrantReadWriteLock());
+    return authzPaths;
+  }
+
+  @Override
+  public void addPath(String authzObj, String path) {
+    LOGGER.debug("#### HMS Path Update ["
+        + "OP : addPath, "
+        + "authzObj : " + authzObj + ", "
+        + "path : " + path + "]");
+    PathsUpdate update = createHMSUpdate();
+    update.newPathChange(authzObj).addToAddPaths(PathsUpdate.cleanPath(path));
+    notifySentry(update, true);
+  }
+
+  @Override
+  public void removeAllPaths(String authzObj, List<String> childObjects) {
+    LOGGER.debug("#### HMS Path Update ["
+        + "OP : removeAllPaths, "
+        + "authzObj : " + authzObj + ", "
+        + "childObjs : " + (childObjects == null ? "[]" : childObjects) + "]");
+    PathsUpdate update = createHMSUpdate();
+    if (childObjects != null) {
+      for (String childObj : childObjects) {
+        update.newPathChange(authzObj + "." + childObj).addToDelPaths(
+            Lists.newArrayList(PathsUpdate.ALL_PATHS));
+      }
+    }
+    update.newPathChange(authzObj).addToDelPaths(
+        Lists.newArrayList(PathsUpdate.ALL_PATHS));
+    notifySentry(update, true);
+  }
+
+  @Override
+  public void removePath(String authzObj, String path) {
+    if ("*".equals(path)) {
+      removeAllPaths(authzObj, null);
+    } else {
+      LOGGER.debug("#### HMS Path Update ["
+          + "OP : removePath, "
+          + "authzObj : " + authzObj + ", "
+          + "path : " + path + "]");
+      PathsUpdate update = createHMSUpdate();
+      update.newPathChange(authzObj).addToDelPaths(PathsUpdate.cleanPath(path));
+      notifySentry(update, true);
+    }
+  }
+
+  @Override
+  public void renameAuthzObject(String oldName, String oldPath, String newName,
+      String newPath) {
+    PathsUpdate update = createHMSUpdate();
+    LOGGER.debug("#### HMS Path Update ["
+        + "OP : renameAuthzObject, "
+        + "oldName : " + oldName + ","
+        + "newPath : " + oldPath + ","
+        + "newName : " + newName + ","
+        + "newPath : " + newPath + "]");
+    update.newPathChange(newName).addToAddPaths(PathsUpdate.cleanPath(newPath));
+    update.newPathChange(oldName).addToDelPaths(PathsUpdate.cleanPath(oldPath));
+    notifySentry(update, true);
+  }
+
+  private SentryHDFSServiceClient getClient() {
+    if (sentryClient == null) {
+      try {
+        sentryClient = new SentryHDFSServiceClient(conf);
+      } catch (IOException e) {
+        sentryClient = null;
+        LOGGER.error("Could not connect to Sentry HDFS Service !!", e);
+      }
+    }
+    return sentryClient;
+  }
+
+  private PathsUpdate createHMSUpdate() {
+    PathsUpdate update = new PathsUpdate(seqNum.incrementAndGet(), false);
+    LOGGER.debug("#### HMS Path Update SeqNum : [" + seqNum.get() + "]");
+    return update;
+  }
+
+  private void notifySentryNoLock(PathsUpdate update, boolean applyLocal) {
+    if (applyLocal) {
+      authzPaths.updatePartial(Lists.newArrayList(update), new ReentrantReadWriteLock());
+    }
+    try {
+      getClient().notifyHMSUpdate(update);
+    } catch (Exception e) {
+      LOGGER.error("Could not send update to Sentry HDFS Service !!", e);
+    } finally {
+      lastSentSeqNum = update.getSeqNum();
+      LOGGER.debug("#### HMS Path Last update sent : [" + lastSentSeqNum + "]");
+    }
+  }
+
+  private void notifySentry(PathsUpdate update, boolean applyLocal) {
+    notificiationLock.lock();
+    try {
+      notifySentryNoLock(update, applyLocal);
+    } finally {
+      notificiationLock.unlock();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java
new file mode 100644
index 0000000..cc849b9
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sentry.hdfs;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.sentry.hdfs.service.thrift.SentryHDFSService;
+import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse;
+import org.apache.sentry.hdfs.service.thrift.TPathsUpdate;
+import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SentryHDFSServiceProcessor implements SentryHDFSService.Iface {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceProcessor.class);
+
+  @Override
+  public TAuthzUpdateResponse get_all_authz_updates_from(long permSeqNum, long pathSeqNum)
+      throws TException {
+    TAuthzUpdateResponse retVal = new TAuthzUpdateResponse();
+    retVal.setAuthzPathUpdate(new LinkedList<TPathsUpdate>());
+    retVal.setAuthzPermUpdate(new LinkedList<TPermissionsUpdate>());
+    if (SentryPlugin.instance != null) {
+      List<PermissionsUpdate> permUpdates = SentryPlugin.instance.getAllPermsUpdatesFrom(permSeqNum);
+      List<PathsUpdate> pathUpdates = SentryPlugin.instance.getAllPathsUpdatesFrom(pathSeqNum);
+      try {
+        for (PathsUpdate update : pathUpdates) {
+          if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("### Sending PATH preUpdate seq [" + update.getSeqNum() + "] ###");
+            LOGGER.debug("### Sending PATH preUpdate [" + update.toThrift() + "] ###");
+          }
+          retVal.getAuthzPathUpdate().add(update.toThrift());
+        }
+        for (PermissionsUpdate update : permUpdates) {
+          if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("### Sending PERM preUpdate seq [" + update.getSeqNum() + "] ###");
+            LOGGER.debug("### Sending PERM preUpdate [" + update.toThrift() + "] ###");
+          }
+          retVal.getAuthzPermUpdate().add(update.toThrift());
+        }
+        if (LOGGER.isDebugEnabled()) {
+          StringBuilder permSeq = new StringBuilder("<");
+          for (PermissionsUpdate permUpdate : permUpdates) {
+            permSeq.append(permUpdate.getSeqNum()).append(",");
+          }
+          permSeq.append(">");
+          StringBuilder pathSeq = new StringBuilder("<");
+          for (PathsUpdate pathUpdate : pathUpdates) {
+            pathSeq.append(pathUpdate.getSeqNum()).append(",");
+          }
+          pathSeq.append(">");
+          LOGGER.debug("#### Updates requested from HDFS ["
+              + "permReq=" + permSeqNum + ", permResp=" + permSeq + "] "
+              + "[pathReq=" + pathSeqNum + ", pathResp=" + pathSeq + "]");
+        }
+      } catch (Exception e) {
+        LOGGER.error("Error Sending updates to downstream Cache", e);
+        throw new TException(e);
+      }
+    } else {
+      LOGGER.error("SentryPlugin not initialized yet !!");
+    }
+    
+    return retVal;
+  }
+
+  @Override
+  public void handle_hms_notification(TPathsUpdate update) throws TException {
+    try {
+      PathsUpdate hmsUpdate = new PathsUpdate(update);
+      if (SentryPlugin.instance != null) {
+        SentryPlugin.instance.handlePathUpdateNotification(hmsUpdate);
+        LOGGER.debug("Authz Paths update [" + hmsUpdate.getSeqNum() + "]..");
+      } else {
+        LOGGER.error("SentryPlugin not initialized yet !!");
+      }
+    } catch (Exception e) {
+      LOGGER.error("Error handling notification from HMS", e);
+      throw new TException(e);
+    }
+  }
+
+  @Override
+  public long check_hms_seq_num(long pathSeqNum) throws TException {
+    return SentryPlugin.instance.getLastSeenHMSPathSeqNum();
+  }
+
+  /**
+   * Not implemented for the time being..
+   */
+  @Override
+  public Map<String, List<String>> get_all_related_paths(String arg0,
+      boolean arg1) throws TException {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java
new file mode 100644
index 0000000..d35de75
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sentry.hdfs;
+
+import java.net.Socket;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.sentry.hdfs.service.thrift.SentryHDFSService;
+import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Iface;
+import org.apache.sentry.provider.db.log.util.CommandUtil;
+import org.apache.sentry.service.thrift.ProcessorFactory;
+import org.apache.thrift.TException;
+import org.apache.thrift.TMultiplexedProcessor;
+import org.apache.thrift.TProcessor;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TSaslClientTransport;
+import org.apache.thrift.transport.TSaslServerTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SentryHDFSServiceProcessorFactory extends ProcessorFactory{
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceProcessorFactory.class);
+
+  static class ProcessorWrapper extends SentryHDFSService.Processor<SentryHDFSService.Iface> {
+
+    public ProcessorWrapper(Iface iface) {
+      super(iface);
+    }
+    @Override
+    public boolean process(TProtocol in, TProtocol out) throws TException {
+      setIpAddress(in);
+      setImpersonator(in);
+      return super.process(in, out);
+    }
+
+    private void setImpersonator(final TProtocol in) {
+      TTransport transport = in.getTransport();
+      if (transport instanceof TSaslServerTransport) {
+        String impersonator = ((TSaslServerTransport) transport).getSaslServer().getAuthorizationID();
+        CommandUtil.setImpersonator(impersonator);
+      }
+    }
+
+    private void setIpAddress(final TProtocol in) {
+      TTransport transport = in.getTransport();
+      TSocket tSocket = getUnderlyingSocketFromTransport(transport);
+      if (tSocket != null) {
+        setIpAddress(tSocket.getSocket());
+      } else {
+        LOGGER.warn("Unknown Transport, cannot determine ipAddress");
+      }
+    }
+
+    private void setIpAddress(Socket socket) {
+      CommandUtil.setIpAddress(socket.getInetAddress().toString());
+    }
+
+    private TSocket getUnderlyingSocketFromTransport(TTransport transport) {
+      if (transport != null) {
+        if (transport instanceof TSaslServerTransport) {
+          transport = ((TSaslServerTransport) transport).getUnderlyingTransport();
+        } else if (transport instanceof TSaslClientTransport) {
+          transport = ((TSaslClientTransport) transport).getUnderlyingTransport();
+        } else {
+          if (!(transport instanceof TSocket)) {
+            LOGGER.warn("Transport class [" + transport.getClass().getName() + "] is not of type TSocket");
+            return null;
+          }
+        }
+        return (TSocket) transport;
+      }
+      return null;
+    }
+  }
+
+  public SentryHDFSServiceProcessorFactory(Configuration conf) {
+    super(conf);
+  }
+
+
+  public boolean register(TMultiplexedProcessor multiplexedProcessor) throws Exception {
+    SentryHDFSServiceProcessor sentryServiceHandler =
+        new SentryHDFSServiceProcessor();
+    TProcessor processor = new ProcessorWrapper(sentryServiceHandler);
+    multiplexedProcessor.registerProcessor(
+        SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME, processor);
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java
new file mode 100644
index 0000000..55b7697
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java
@@ -0,0 +1,247 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sentry.hdfs;
+
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.sentry.hdfs.ServiceConstants.ServerConfig;
+import org.apache.sentry.hdfs.UpdateForwarder.ExternalImageRetriever;
+import org.apache.sentry.hdfs.service.thrift.TPathChanges;
+import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate;
+import org.apache.sentry.hdfs.service.thrift.TPrivilegeChanges;
+import org.apache.sentry.hdfs.service.thrift.TRoleChanges;
+import org.apache.sentry.provider.db.SentryPolicyStorePlugin;
+import org.apache.sentry.provider.db.SentryPolicyStorePlugin.SentryPluginException;
+import org.apache.sentry.provider.db.service.persistent.SentryStore;
+import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleAddGroupsRequest;
+import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleDeleteGroupsRequest;
+import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleGrantPrivilegeRequest;
+import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleRevokePrivilegeRequest;
+import org.apache.sentry.provider.db.service.thrift.TDropPrivilegesRequest;
+import org.apache.sentry.provider.db.service.thrift.TDropSentryRoleRequest;
+import org.apache.sentry.provider.db.service.thrift.TRenamePrivilegesRequest;
+import org.apache.sentry.provider.db.service.thrift.TSentryAuthorizable;
+import org.apache.sentry.provider.db.service.thrift.TSentryGroup;
+import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Strings;
+
+public class SentryPlugin implements SentryPolicyStorePlugin {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(SentryPlugin.class);
+
+  public static volatile SentryPlugin instance;
+
+  static class PermImageRetriever implements ExternalImageRetriever<PermissionsUpdate> {
+
+    private final SentryStore sentryStore;
+
+    public PermImageRetriever(SentryStore sentryStore) {
+      this.sentryStore = sentryStore;
+    }
+
+    @Override
+    public PermissionsUpdate retrieveFullImage(long currSeqNum) {
+      Map<String, HashMap<String, String>> privilegeImage = sentryStore.retrieveFullPrivilegeImage();
+      Map<String, LinkedList<String>> roleImage = sentryStore.retrieveFullRoleImage();
+      
+      TPermissionsUpdate tPermUpdate = new TPermissionsUpdate(true, currSeqNum,
+          new HashMap<String, TPrivilegeChanges>(),
+          new HashMap<String, TRoleChanges>());
+      for (Map.Entry<String, HashMap<String, String>> privEnt : privilegeImage.entrySet()) {
+        String authzObj = privEnt.getKey();
+        HashMap<String,String> privs = privEnt.getValue();
+        tPermUpdate.putToPrivilegeChanges(authzObj, new TPrivilegeChanges(
+            authzObj, privs, new HashMap<String, String>()));
+      }
+      for (Map.Entry<String, LinkedList<String>> privEnt : roleImage.entrySet()) {
+        String role = privEnt.getKey();
+        LinkedList<String> groups = privEnt.getValue();
+        tPermUpdate.putToRoleChanges(role, new TRoleChanges(role, groups, new LinkedList<String>()));
+      }
+      PermissionsUpdate permissionsUpdate = new PermissionsUpdate(tPermUpdate);
+      permissionsUpdate.setSeqNum(currSeqNum);
+      return permissionsUpdate;
+    }
+    
+  }
+
+  private UpdateForwarder<PathsUpdate> pathsUpdater;
+  private UpdateForwarder<PermissionsUpdate> permsUpdater;
+  private final AtomicLong permSeqNum = new AtomicLong(5);
+  private PermImageRetriever permImageRetriever;
+
+  long getLastSeenHMSPathSeqNum() {
+    return pathsUpdater.getLastSeen();
+  }
+
+  @Override
+  public void initialize(Configuration conf, SentryStore sentryStore) throws SentryPluginException {
+    final String[] pathPrefixes = conf
+        .getStrings(ServerConfig.SENTRY_HDFS_INTEGRATION_PATH_PREFIXES,
+            ServerConfig.SENTRY_HDFS_INTEGRATION_PATH_PREFIXES_DEFAULT);
+    final int initUpdateRetryDelayMs =
+        conf.getInt(ServerConfig.SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_MS,
+            ServerConfig.SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_DEFAULT);
+    pathsUpdater = new UpdateForwarder<PathsUpdate>(new UpdateableAuthzPaths(
+        pathPrefixes), null, 100, initUpdateRetryDelayMs);
+    permImageRetriever = new PermImageRetriever(sentryStore);
+    permsUpdater = new UpdateForwarder<PermissionsUpdate>(
+        new UpdateablePermissions(permImageRetriever), permImageRetriever,
+        100, initUpdateRetryDelayMs);
+    LOGGER.info("Sentry HDFS plugin initialized !!");
+    instance = this;
+  }
+
+  public List<PathsUpdate> getAllPathsUpdatesFrom(long pathSeqNum) {
+    return pathsUpdater.getAllUpdatesFrom(pathSeqNum);
+  }
+
+  public List<PermissionsUpdate> getAllPermsUpdatesFrom(long permSeqNum) {
+    return permsUpdater.getAllUpdatesFrom(permSeqNum);
+  }
+
+  public void handlePathUpdateNotification(PathsUpdate update) {
+    pathsUpdater.handleUpdateNotification(update);
+    LOGGER.debug("Recieved Authz Path update [" + update.getSeqNum() + "]..");
+  }
+
+  @Override
+  public void onAlterSentryRoleAddGroups(
+      TAlterSentryRoleAddGroupsRequest request) throws SentryPluginException {
+    PermissionsUpdate update = new PermissionsUpdate(permSeqNum.incrementAndGet(), false);
+    TRoleChanges rUpdate = update.addRoleUpdate(request.getRoleName());
+    for (TSentryGroup group : request.getGroups()) {
+      rUpdate.addToAddGroups(group.getGroupName());
+    }
+    permsUpdater.handleUpdateNotification(update);
+    LOGGER.debug("Authz Perm preUpdate [" + update.getSeqNum() + ", " + request.getRoleName() + "]..");
+  }
+
+  @Override
+  public void onAlterSentryRoleDeleteGroups(
+      TAlterSentryRoleDeleteGroupsRequest request)
+      throws SentryPluginException {
+    PermissionsUpdate update = new PermissionsUpdate(permSeqNum.incrementAndGet(), false);
+    TRoleChanges rUpdate = update.addRoleUpdate(request.getRoleName());
+    for (TSentryGroup group : request.getGroups()) {
+      rUpdate.addToDelGroups(group.getGroupName());
+    }
+    permsUpdater.handleUpdateNotification(update);
+    LOGGER.debug("Authz Perm preUpdate [" + update.getSeqNum() + ", " + request.getRoleName() + "]..");
+  }
+
+  @Override
+  public void onAlterSentryRoleGrantPrivilege(
+      TAlterSentryRoleGrantPrivilegeRequest request)
+      throws SentryPluginException {
+    String authzObj = getAuthzObj(request.getPrivilege());
+    if (authzObj != null) {
+      PermissionsUpdate update = new PermissionsUpdate(permSeqNum.incrementAndGet(), false);
+      update.addPrivilegeUpdate(authzObj).putToAddPrivileges(
+          request.getRoleName(), request.getPrivilege().getAction().toUpperCase());
+      permsUpdater.handleUpdateNotification(update);
+      LOGGER.debug("Authz Perm preUpdate [" + update.getSeqNum() + "]..");
+    }
+  }
+
+  @Override
+  public void onRenameSentryPrivilege(TRenamePrivilegesRequest request)
+      throws SentryPluginException {
+    String oldAuthz = getAuthzObj(request.getOldAuthorizable());
+    String newAuthz = getAuthzObj(request.getNewAuthorizable());
+    PermissionsUpdate update = new PermissionsUpdate(permSeqNum.incrementAndGet(), false);
+    TPrivilegeChanges privUpdate = update.addPrivilegeUpdate(PermissionsUpdate.RENAME_PRIVS);
+    privUpdate.putToAddPrivileges(newAuthz, newAuthz);
+    privUpdate.putToDelPrivileges(oldAuthz, oldAuthz);
+    permsUpdater.handleUpdateNotification(update);
+    LOGGER.debug("Authz Perm preUpdate [" + update.getSeqNum() + ", " + newAuthz + ", " + oldAuthz + "]..");
+  }
+
+  @Override
+  public void onAlterSentryRoleRevokePrivilege(
+      TAlterSentryRoleRevokePrivilegeRequest request)
+      throws SentryPluginException {
+    String authzObj = getAuthzObj(request.getPrivilege());
+    if (authzObj != null) {
+      PermissionsUpdate update = new PermissionsUpdate(permSeqNum.incrementAndGet(), false);
+      update.addPrivilegeUpdate(authzObj).putToDelPrivileges(
+          request.getRoleName(), request.getPrivilege().getAction().toUpperCase());
+      permsUpdater.handleUpdateNotification(update);
+      LOGGER.debug("Authz Perm preUpdate [" + update.getSeqNum() + ", " + authzObj + "]..");
+    }
+  }
+
+  @Override
+  public void onDropSentryRole(TDropSentryRoleRequest request)
+      throws SentryPluginException {
+    PermissionsUpdate update = new PermissionsUpdate(permSeqNum.incrementAndGet(), false);
+    update.addPrivilegeUpdate(PermissionsUpdate.ALL_AUTHZ_OBJ).putToDelPrivileges(
+        request.getRoleName(), PermissionsUpdate.ALL_AUTHZ_OBJ);
+    update.addRoleUpdate(request.getRoleName()).addToDelGroups(PermissionsUpdate.ALL_GROUPS);
+    permsUpdater.handleUpdateNotification(update);
+    LOGGER.debug("Authz Perm preUpdate [" + update.getSeqNum() + ", " + request.getRoleName() + "]..");
+  }
+
+  @Override
+  public void onDropSentryPrivilege(TDropPrivilegesRequest request)
+      throws SentryPluginException {
+    PermissionsUpdate update = new PermissionsUpdate(permSeqNum.incrementAndGet(), false);
+    String authzObj = getAuthzObj(request.getAuthorizable());
+    update.addPrivilegeUpdate(authzObj).putToDelPrivileges(
+        PermissionsUpdate.ALL_ROLES, PermissionsUpdate.ALL_ROLES);
+    permsUpdater.handleUpdateNotification(update);
+    LOGGER.debug("Authz Perm preUpdate [" + update.getSeqNum() + ", " + authzObj + "]..");
+  }
+
+  private String getAuthzObj(TSentryPrivilege privilege) {
+    String authzObj = null;
+    if (!SentryStore.isNULL(privilege.getDbName())) {
+      String dbName = privilege.getDbName();
+      String tblName = privilege.getTableName();
+      if (SentryStore.isNULL(tblName)) {
+        authzObj = dbName;
+      } else {
+        authzObj = dbName + "." + tblName;
+      }
+    }
+    return authzObj;
+  }
+
+  private String getAuthzObj(TSentryAuthorizable authzble) {
+    String authzObj = null;
+    if (!SentryStore.isNULL(authzble.getDb())) {
+      String dbName = authzble.getDb();
+      String tblName = authzble.getTable();
+      if (SentryStore.isNULL(tblName)) {
+        authzObj = dbName;
+      } else {
+        authzObj = dbName + "." + tblName;
+      }
+    }
+    return authzObj;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarder.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarder.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarder.java
new file mode 100644
index 0000000..f321d3d
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateForwarder.java
@@ -0,0 +1,292 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
+public class UpdateForwarder<K extends Updateable.Update> implements
+    Updateable<K> {
+
+  public static interface ExternalImageRetriever<K> {
+
+    public K retrieveFullImage(long currSeqNum);
+
+  }
+
+  private final AtomicLong lastSeenSeqNum = new AtomicLong(0);
+  private final AtomicLong lastCommittedSeqNum = new AtomicLong(0);
+  // Updates should be handled in order
+  private final Executor updateHandler = Executors.newSingleThreadExecutor();
+
+  // Update log is used when propagate updates to a downstream cache.
+  // The preUpdate log stores all commits that were applied to this cache.
+  // When the update log is filled to capacity (updateLogSize), all
+  // entries are cleared and a compact image if the state of the cache is
+  // appended to the log.
+  // The first entry in an update log (consequently the first preUpdate a
+  // downstream cache sees) will be a full image. All subsequent entries are
+  // partial edits
+  private final LinkedList<K> updateLog = new LinkedList<K>();
+  // UpdateLog is disabled when updateLogSize = 0;
+  private final int updateLogSize;
+
+  private final ExternalImageRetriever<K> imageRetreiver;
+
+  private volatile Updateable<K> updateable;
+
+  private final ReadWriteLock lock = new ReentrantReadWriteLock();
+  private static final long INIT_SEQ_NUM = -2;
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(UpdateForwarder.class);
+
+  public UpdateForwarder(Updateable<K> updateable,
+      ExternalImageRetriever<K> imageRetreiver, int updateLogSize) {
+    this(updateable, imageRetreiver, updateLogSize, 5000);
+  }
+  public UpdateForwarder(Updateable<K> updateable,
+      ExternalImageRetriever<K> imageRetreiver, int updateLogSize,
+      int initUpdateRetryDelay) {
+    this.updateLogSize = updateLogSize;
+    this.imageRetreiver = imageRetreiver;
+    if (imageRetreiver != null) {
+      spawnInitialUpdater(updateable, initUpdateRetryDelay);
+    } else {
+      this.updateable = updateable;
+    }
+  }
+
+  private void spawnInitialUpdater(final Updateable<K> updateable,
+      final int initUpdateRetryDelay) {
+    K firstFullImage = null;
+    try {
+      firstFullImage = imageRetreiver.retrieveFullImage(INIT_SEQ_NUM);
+    } catch (Exception e) {
+      LOGGER.warn("InitialUpdater encountered exception !! ", e);
+      firstFullImage = null;
+      Thread initUpdater = new Thread() {
+        @Override
+        public void run() {
+          while (UpdateForwarder.this.updateable == null) {
+            try {
+              Thread.sleep(initUpdateRetryDelay);
+            } catch (InterruptedException e) {
+              LOGGER.warn("Thread interrupted !! ", e);
+              break;
+            }
+            K fullImage = null;
+            try {
+              fullImage =
+                  UpdateForwarder.this.imageRetreiver
+                  .retrieveFullImage(INIT_SEQ_NUM);
+              appendToUpdateLog(fullImage);
+            } catch (Exception e) {
+              LOGGER.warn("InitialUpdater encountered exception !! ", e);
+            }
+            if (fullImage != null) {
+              UpdateForwarder.this.updateable = updateable.updateFull(fullImage);
+            }
+          }
+        }
+      };
+      initUpdater.start();
+    }
+    if (firstFullImage != null) {
+      appendToUpdateLog(firstFullImage);
+      this.updateable = updateable.updateFull(firstFullImage);
+    }
+  }
+  /**
+   * Handle notifications from HMS plug-in or upstream Cache
+   * @param update
+   */
+  public void handleUpdateNotification(final K update) {
+    // Correct the seqNums on the first update
+    if (lastCommittedSeqNum.get() == INIT_SEQ_NUM) {
+      K firstUpdate = updateLog.peek();
+      long firstSeqNum = update.getSeqNum() - 1; 
+      if (firstUpdate != null) {
+        firstUpdate.setSeqNum(firstSeqNum);
+      }
+      lastCommittedSeqNum.set(firstSeqNum);
+      lastSeenSeqNum.set(firstSeqNum);
+    }
+    final boolean editNotMissed = 
+        lastSeenSeqNum.incrementAndGet() == update.getSeqNum();
+    if (!editNotMissed) {
+      lastSeenSeqNum.set(update.getSeqNum());
+    }
+    Runnable task = new Runnable() {
+      @Override
+      public void run() {
+        K toUpdate = update;
+        if (update.hasFullImage()) {
+          updateable = updateable.updateFull(update);
+        } else {
+          if (editNotMissed) {
+            // apply partial preUpdate
+            updateable.updatePartial(Lists.newArrayList(update), lock);
+          } else {
+            // Retrieve full update from External Source and
+            if (imageRetreiver != null) {
+              toUpdate = imageRetreiver
+                  .retrieveFullImage(update.getSeqNum());
+              updateable = updateable.updateFull(toUpdate);
+            }
+          }
+        }
+        appendToUpdateLog(toUpdate);
+      }
+    };
+    updateHandler.execute(task);
+  }
+
+  private void appendToUpdateLog(K update) {
+    synchronized (updateLog) {
+      boolean logCompacted = false;
+      if (updateLogSize > 0) {
+        if (update.hasFullImage() || (updateLog.size() == updateLogSize)) {
+          // Essentially a log compaction
+          updateLog.clear();
+          updateLog.add(update.hasFullImage() ? update
+              : createFullImageUpdate(update.getSeqNum()));
+          logCompacted = true;
+        } else {
+          updateLog.add(update);
+        }
+      }
+      lastCommittedSeqNum.set(update.getSeqNum());
+      if (LOGGER.isDebugEnabled()) {
+        LOGGER.debug("#### Appending to Update Log ["
+            + "type=" + update.getClass() + ", "
+            + "lastCommit=" + lastCommittedSeqNum.get() + ", "
+            + "lastSeen=" + lastSeenSeqNum.get() + ", "
+            + "logCompacted=" + logCompacted + "]");
+      }
+    }
+  }
+
+  /**
+   * Return all updates from requested seqNum (inclusive)
+   * @param seqNum
+   * @return
+   */
+  public List<K> getAllUpdatesFrom(long seqNum) {
+    List<K> retVal = new LinkedList<K>();
+    synchronized (updateLog) {
+      long currSeqNum = lastCommittedSeqNum.get();
+      if (LOGGER.isDebugEnabled() && (updateable != null)) {
+        LOGGER.debug("#### GetAllUpdatesFrom ["
+            + "type=" + updateable.getClass() + ", "
+            + "reqSeqNum=" + seqNum + ", "
+            + "lastCommit=" + lastCommittedSeqNum.get() + ", "
+            + "lastSeen=" + lastSeenSeqNum.get() + ", "
+            + "updateLogSize=" + updateLog.size() + "]");
+      }
+      if (updateLogSize == 0) {
+        // no updatelog configured..
+        return retVal;
+      }
+      K head = updateLog.peek();
+      if (head == null) {
+        return retVal;
+      }
+      if (seqNum > currSeqNum + 1) {
+        // This process has probably restarted since downstream
+        // recieved last update
+        retVal.addAll(updateLog);
+        return retVal;
+      }
+      if (head.getSeqNum() > seqNum) {
+        // Caller has diverged greatly..
+        if (head.hasFullImage()) {
+          // head is a refresh(full) image
+          // Send full image along with partial updates
+          for (K u : updateLog) {
+            retVal.add(u);
+          }
+        } else {
+          // Create a full image
+          // clear updateLog
+          // add fullImage to head of Log
+          // NOTE : This should ideally never happen
+          K fullImage = createFullImageUpdate(currSeqNum);
+          updateLog.clear();
+          updateLog.add(fullImage);
+          retVal.add(fullImage);
+        }
+      } else {
+        // increment iterator to requested seqNum
+        Iterator<K> iter = updateLog.iterator();
+        while (iter.hasNext()) {
+          K elem = iter.next();
+          if (elem.getSeqNum() >= seqNum) {
+            retVal.add(elem);
+          }
+        }
+      }
+    }
+    return retVal;
+  }
+ 
+  public boolean areAllUpdatesCommited() {
+    return lastCommittedSeqNum.get() == lastSeenSeqNum.get();
+  }
+
+  public long getLastCommitted() {
+    return lastCommittedSeqNum.get();
+  }
+
+  public long getLastSeen() {
+    return lastSeenSeqNum.get();
+  }
+
+  @Override
+  public Updateable<K> updateFull(K update) {
+    return (updateable != null) ? updateable.updateFull(update) : null;
+  }
+
+  @Override
+  public void updatePartial(Iterable<K> updates, ReadWriteLock lock) {
+    if (updateable != null) {
+      updateable.updatePartial(updates, lock);
+    }
+  }
+  
+  @Override
+  public long getLastUpdatedSeqNum() {
+    return (updateable != null) ? updateable.getLastUpdatedSeqNum() : INIT_SEQ_NUM;
+  }
+
+  @Override
+  public K createFullImageUpdate(long currSeqNum) {
+    return (updateable != null) ? updateable.createFullImageUpdate(currSeqNum) : null;
+  }
+
+}


[8/9] incubator-sentry git commit: SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

Posted by ls...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/SentryHDFSService.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/SentryHDFSService.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/SentryHDFSService.java
new file mode 100644
index 0000000..663fe4e
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/SentryHDFSService.java
@@ -0,0 +1,3483 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.sentry.hdfs.service.thrift;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SentryHDFSService {
+
+  public interface Iface {
+
+    public void handle_hms_notification(TPathsUpdate pathsUpdate) throws org.apache.thrift.TException;
+
+    public long check_hms_seq_num(long pathSeqNum) throws org.apache.thrift.TException;
+
+    public TAuthzUpdateResponse get_all_authz_updates_from(long permSeqNum, long pathSeqNum) throws org.apache.thrift.TException;
+
+    public Map<String,List<String>> get_all_related_paths(String path, boolean exactMatch) throws org.apache.thrift.TException;
+
+  }
+
+  public interface AsyncIface {
+
+    public void handle_hms_notification(TPathsUpdate pathsUpdate, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.handle_hms_notification_call> resultHandler) throws org.apache.thrift.TException;
+
+    public void check_hms_seq_num(long pathSeqNum, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.check_hms_seq_num_call> resultHandler) throws org.apache.thrift.TException;
+
+    public void get_all_authz_updates_from(long permSeqNum, long pathSeqNum, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_all_authz_updates_from_call> resultHandler) throws org.apache.thrift.TException;
+
+    public void get_all_related_paths(String path, boolean exactMatch, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_all_related_paths_call> resultHandler) throws org.apache.thrift.TException;
+
+  }
+
+  public static class Client extends org.apache.thrift.TServiceClient implements Iface {
+    public static class Factory implements org.apache.thrift.TServiceClientFactory<Client> {
+      public Factory() {}
+      public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
+        return new Client(prot);
+      }
+      public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
+        return new Client(iprot, oprot);
+      }
+    }
+
+    public Client(org.apache.thrift.protocol.TProtocol prot)
+    {
+      super(prot, prot);
+    }
+
+    public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
+      super(iprot, oprot);
+    }
+
+    public void handle_hms_notification(TPathsUpdate pathsUpdate) throws org.apache.thrift.TException
+    {
+      send_handle_hms_notification(pathsUpdate);
+      recv_handle_hms_notification();
+    }
+
+    public void send_handle_hms_notification(TPathsUpdate pathsUpdate) throws org.apache.thrift.TException
+    {
+      handle_hms_notification_args args = new handle_hms_notification_args();
+      args.setPathsUpdate(pathsUpdate);
+      sendBase("handle_hms_notification", args);
+    }
+
+    public void recv_handle_hms_notification() throws org.apache.thrift.TException
+    {
+      handle_hms_notification_result result = new handle_hms_notification_result();
+      receiveBase(result, "handle_hms_notification");
+      return;
+    }
+
+    public long check_hms_seq_num(long pathSeqNum) throws org.apache.thrift.TException
+    {
+      send_check_hms_seq_num(pathSeqNum);
+      return recv_check_hms_seq_num();
+    }
+
+    public void send_check_hms_seq_num(long pathSeqNum) throws org.apache.thrift.TException
+    {
+      check_hms_seq_num_args args = new check_hms_seq_num_args();
+      args.setPathSeqNum(pathSeqNum);
+      sendBase("check_hms_seq_num", args);
+    }
+
+    public long recv_check_hms_seq_num() throws org.apache.thrift.TException
+    {
+      check_hms_seq_num_result result = new check_hms_seq_num_result();
+      receiveBase(result, "check_hms_seq_num");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "check_hms_seq_num failed: unknown result");
+    }
+
+    public TAuthzUpdateResponse get_all_authz_updates_from(long permSeqNum, long pathSeqNum) throws org.apache.thrift.TException
+    {
+      send_get_all_authz_updates_from(permSeqNum, pathSeqNum);
+      return recv_get_all_authz_updates_from();
+    }
+
+    public void send_get_all_authz_updates_from(long permSeqNum, long pathSeqNum) throws org.apache.thrift.TException
+    {
+      get_all_authz_updates_from_args args = new get_all_authz_updates_from_args();
+      args.setPermSeqNum(permSeqNum);
+      args.setPathSeqNum(pathSeqNum);
+      sendBase("get_all_authz_updates_from", args);
+    }
+
+    public TAuthzUpdateResponse recv_get_all_authz_updates_from() throws org.apache.thrift.TException
+    {
+      get_all_authz_updates_from_result result = new get_all_authz_updates_from_result();
+      receiveBase(result, "get_all_authz_updates_from");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_all_authz_updates_from failed: unknown result");
+    }
+
+    public Map<String,List<String>> get_all_related_paths(String path, boolean exactMatch) throws org.apache.thrift.TException
+    {
+      send_get_all_related_paths(path, exactMatch);
+      return recv_get_all_related_paths();
+    }
+
+    public void send_get_all_related_paths(String path, boolean exactMatch) throws org.apache.thrift.TException
+    {
+      get_all_related_paths_args args = new get_all_related_paths_args();
+      args.setPath(path);
+      args.setExactMatch(exactMatch);
+      sendBase("get_all_related_paths", args);
+    }
+
+    public Map<String,List<String>> recv_get_all_related_paths() throws org.apache.thrift.TException
+    {
+      get_all_related_paths_result result = new get_all_related_paths_result();
+      receiveBase(result, "get_all_related_paths");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_all_related_paths failed: unknown result");
+    }
+
+  }
+  public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
+    public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
+      private org.apache.thrift.async.TAsyncClientManager clientManager;
+      private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
+      public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
+        this.clientManager = clientManager;
+        this.protocolFactory = protocolFactory;
+      }
+      public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
+        return new AsyncClient(protocolFactory, clientManager, transport);
+      }
+    }
+
+    public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) {
+      super(protocolFactory, clientManager, transport);
+    }
+
+    public void handle_hms_notification(TPathsUpdate pathsUpdate, org.apache.thrift.async.AsyncMethodCallback<handle_hms_notification_call> resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      handle_hms_notification_call method_call = new handle_hms_notification_call(pathsUpdate, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class handle_hms_notification_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private TPathsUpdate pathsUpdate;
+      public handle_hms_notification_call(TPathsUpdate pathsUpdate, org.apache.thrift.async.AsyncMethodCallback<handle_hms_notification_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.pathsUpdate = pathsUpdate;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("handle_hms_notification", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        handle_hms_notification_args args = new handle_hms_notification_args();
+        args.setPathsUpdate(pathsUpdate);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_handle_hms_notification();
+      }
+    }
+
+    public void check_hms_seq_num(long pathSeqNum, org.apache.thrift.async.AsyncMethodCallback<check_hms_seq_num_call> resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      check_hms_seq_num_call method_call = new check_hms_seq_num_call(pathSeqNum, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class check_hms_seq_num_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private long pathSeqNum;
+      public check_hms_seq_num_call(long pathSeqNum, org.apache.thrift.async.AsyncMethodCallback<check_hms_seq_num_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.pathSeqNum = pathSeqNum;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("check_hms_seq_num", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        check_hms_seq_num_args args = new check_hms_seq_num_args();
+        args.setPathSeqNum(pathSeqNum);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public long getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_check_hms_seq_num();
+      }
+    }
+
+    public void get_all_authz_updates_from(long permSeqNum, long pathSeqNum, org.apache.thrift.async.AsyncMethodCallback<get_all_authz_updates_from_call> resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_all_authz_updates_from_call method_call = new get_all_authz_updates_from_call(permSeqNum, pathSeqNum, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class get_all_authz_updates_from_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private long permSeqNum;
+      private long pathSeqNum;
+      public get_all_authz_updates_from_call(long permSeqNum, long pathSeqNum, org.apache.thrift.async.AsyncMethodCallback<get_all_authz_updates_from_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.permSeqNum = permSeqNum;
+        this.pathSeqNum = pathSeqNum;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_all_authz_updates_from", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_all_authz_updates_from_args args = new get_all_authz_updates_from_args();
+        args.setPermSeqNum(permSeqNum);
+        args.setPathSeqNum(pathSeqNum);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public TAuthzUpdateResponse getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_all_authz_updates_from();
+      }
+    }
+
+    public void get_all_related_paths(String path, boolean exactMatch, org.apache.thrift.async.AsyncMethodCallback<get_all_related_paths_call> resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_all_related_paths_call method_call = new get_all_related_paths_call(path, exactMatch, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class get_all_related_paths_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String path;
+      private boolean exactMatch;
+      public get_all_related_paths_call(String path, boolean exactMatch, org.apache.thrift.async.AsyncMethodCallback<get_all_related_paths_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.path = path;
+        this.exactMatch = exactMatch;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_all_related_paths", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_all_related_paths_args args = new get_all_related_paths_args();
+        args.setPath(path);
+        args.setExactMatch(exactMatch);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public Map<String,List<String>> getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_all_related_paths();
+      }
+    }
+
+  }
+
+  public static class Processor<I extends Iface> extends org.apache.thrift.TBaseProcessor<I> implements org.apache.thrift.TProcessor {
+    private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName());
+    public Processor(I iface) {
+      super(iface, getProcessMap(new HashMap<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>>()));
+    }
+
+    protected Processor(I iface, Map<String,  org.apache.thrift.ProcessFunction<I, ? extends  org.apache.thrift.TBase>> processMap) {
+      super(iface, getProcessMap(processMap));
+    }
+
+    private static <I extends Iface> Map<String,  org.apache.thrift.ProcessFunction<I, ? extends  org.apache.thrift.TBase>> getProcessMap(Map<String,  org.apache.thrift.ProcessFunction<I, ? extends  org.apache.thrift.TBase>> processMap) {
+      processMap.put("handle_hms_notification", new handle_hms_notification());
+      processMap.put("check_hms_seq_num", new check_hms_seq_num());
+      processMap.put("get_all_authz_updates_from", new get_all_authz_updates_from());
+      processMap.put("get_all_related_paths", new get_all_related_paths());
+      return processMap;
+    }
+
+    public static class handle_hms_notification<I extends Iface> extends org.apache.thrift.ProcessFunction<I, handle_hms_notification_args> {
+      public handle_hms_notification() {
+        super("handle_hms_notification");
+      }
+
+      public handle_hms_notification_args getEmptyArgsInstance() {
+        return new handle_hms_notification_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public handle_hms_notification_result getResult(I iface, handle_hms_notification_args args) throws org.apache.thrift.TException {
+        handle_hms_notification_result result = new handle_hms_notification_result();
+        iface.handle_hms_notification(args.pathsUpdate);
+        return result;
+      }
+    }
+
+    public static class check_hms_seq_num<I extends Iface> extends org.apache.thrift.ProcessFunction<I, check_hms_seq_num_args> {
+      public check_hms_seq_num() {
+        super("check_hms_seq_num");
+      }
+
+      public check_hms_seq_num_args getEmptyArgsInstance() {
+        return new check_hms_seq_num_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public check_hms_seq_num_result getResult(I iface, check_hms_seq_num_args args) throws org.apache.thrift.TException {
+        check_hms_seq_num_result result = new check_hms_seq_num_result();
+        result.success = iface.check_hms_seq_num(args.pathSeqNum);
+        result.setSuccessIsSet(true);
+        return result;
+      }
+    }
+
+    public static class get_all_authz_updates_from<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_all_authz_updates_from_args> {
+      public get_all_authz_updates_from() {
+        super("get_all_authz_updates_from");
+      }
+
+      public get_all_authz_updates_from_args getEmptyArgsInstance() {
+        return new get_all_authz_updates_from_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_all_authz_updates_from_result getResult(I iface, get_all_authz_updates_from_args args) throws org.apache.thrift.TException {
+        get_all_authz_updates_from_result result = new get_all_authz_updates_from_result();
+        result.success = iface.get_all_authz_updates_from(args.permSeqNum, args.pathSeqNum);
+        return result;
+      }
+    }
+
+    public static class get_all_related_paths<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_all_related_paths_args> {
+      public get_all_related_paths() {
+        super("get_all_related_paths");
+      }
+
+      public get_all_related_paths_args getEmptyArgsInstance() {
+        return new get_all_related_paths_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_all_related_paths_result getResult(I iface, get_all_related_paths_args args) throws org.apache.thrift.TException {
+        get_all_related_paths_result result = new get_all_related_paths_result();
+        result.success = iface.get_all_related_paths(args.path, args.exactMatch);
+        return result;
+      }
+    }
+
+  }
+
+  public static class handle_hms_notification_args implements org.apache.thrift.TBase<handle_hms_notification_args, handle_hms_notification_args._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("handle_hms_notification_args");
+
+    private static final org.apache.thrift.protocol.TField PATHS_UPDATE_FIELD_DESC = new org.apache.thrift.protocol.TField("pathsUpdate", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new handle_hms_notification_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new handle_hms_notification_argsTupleSchemeFactory());
+    }
+
+    private TPathsUpdate pathsUpdate; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      PATHS_UPDATE((short)1, "pathsUpdate");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // PATHS_UPDATE
+            return PATHS_UPDATE;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.PATHS_UPDATE, new org.apache.thrift.meta_data.FieldMetaData("pathsUpdate", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPathsUpdate.class)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(handle_hms_notification_args.class, metaDataMap);
+    }
+
+    public handle_hms_notification_args() {
+    }
+
+    public handle_hms_notification_args(
+      TPathsUpdate pathsUpdate)
+    {
+      this();
+      this.pathsUpdate = pathsUpdate;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public handle_hms_notification_args(handle_hms_notification_args other) {
+      if (other.isSetPathsUpdate()) {
+        this.pathsUpdate = new TPathsUpdate(other.pathsUpdate);
+      }
+    }
+
+    public handle_hms_notification_args deepCopy() {
+      return new handle_hms_notification_args(this);
+    }
+
+    @Override
+    public void clear() {
+      this.pathsUpdate = null;
+    }
+
+    public TPathsUpdate getPathsUpdate() {
+      return this.pathsUpdate;
+    }
+
+    public void setPathsUpdate(TPathsUpdate pathsUpdate) {
+      this.pathsUpdate = pathsUpdate;
+    }
+
+    public void unsetPathsUpdate() {
+      this.pathsUpdate = null;
+    }
+
+    /** Returns true if field pathsUpdate is set (has been assigned a value) and false otherwise */
+    public boolean isSetPathsUpdate() {
+      return this.pathsUpdate != null;
+    }
+
+    public void setPathsUpdateIsSet(boolean value) {
+      if (!value) {
+        this.pathsUpdate = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case PATHS_UPDATE:
+        if (value == null) {
+          unsetPathsUpdate();
+        } else {
+          setPathsUpdate((TPathsUpdate)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case PATHS_UPDATE:
+        return getPathsUpdate();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case PATHS_UPDATE:
+        return isSetPathsUpdate();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof handle_hms_notification_args)
+        return this.equals((handle_hms_notification_args)that);
+      return false;
+    }
+
+    public boolean equals(handle_hms_notification_args that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_pathsUpdate = true && this.isSetPathsUpdate();
+      boolean that_present_pathsUpdate = true && that.isSetPathsUpdate();
+      if (this_present_pathsUpdate || that_present_pathsUpdate) {
+        if (!(this_present_pathsUpdate && that_present_pathsUpdate))
+          return false;
+        if (!this.pathsUpdate.equals(that.pathsUpdate))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      HashCodeBuilder builder = new HashCodeBuilder();
+
+      boolean present_pathsUpdate = true && (isSetPathsUpdate());
+      builder.append(present_pathsUpdate);
+      if (present_pathsUpdate)
+        builder.append(pathsUpdate);
+
+      return builder.toHashCode();
+    }
+
+    public int compareTo(handle_hms_notification_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+      handle_hms_notification_args typedOther = (handle_hms_notification_args)other;
+
+      lastComparison = Boolean.valueOf(isSetPathsUpdate()).compareTo(typedOther.isSetPathsUpdate());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetPathsUpdate()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pathsUpdate, typedOther.pathsUpdate);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("handle_hms_notification_args(");
+      boolean first = true;
+
+      sb.append("pathsUpdate:");
+      if (this.pathsUpdate == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.pathsUpdate);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+      if (pathsUpdate != null) {
+        pathsUpdate.validate();
+      }
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class handle_hms_notification_argsStandardSchemeFactory implements SchemeFactory {
+      public handle_hms_notification_argsStandardScheme getScheme() {
+        return new handle_hms_notification_argsStandardScheme();
+      }
+    }
+
+    private static class handle_hms_notification_argsStandardScheme extends StandardScheme<handle_hms_notification_args> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, handle_hms_notification_args struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 1: // PATHS_UPDATE
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.pathsUpdate = new TPathsUpdate();
+                struct.pathsUpdate.read(iprot);
+                struct.setPathsUpdateIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, handle_hms_notification_args struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.pathsUpdate != null) {
+          oprot.writeFieldBegin(PATHS_UPDATE_FIELD_DESC);
+          struct.pathsUpdate.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class handle_hms_notification_argsTupleSchemeFactory implements SchemeFactory {
+      public handle_hms_notification_argsTupleScheme getScheme() {
+        return new handle_hms_notification_argsTupleScheme();
+      }
+    }
+
+    private static class handle_hms_notification_argsTupleScheme extends TupleScheme<handle_hms_notification_args> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, handle_hms_notification_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetPathsUpdate()) {
+          optionals.set(0);
+        }
+        oprot.writeBitSet(optionals, 1);
+        if (struct.isSetPathsUpdate()) {
+          struct.pathsUpdate.write(oprot);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, handle_hms_notification_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(1);
+        if (incoming.get(0)) {
+          struct.pathsUpdate = new TPathsUpdate();
+          struct.pathsUpdate.read(iprot);
+          struct.setPathsUpdateIsSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class handle_hms_notification_result implements org.apache.thrift.TBase<handle_hms_notification_result, handle_hms_notification_result._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("handle_hms_notification_result");
+
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new handle_hms_notification_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new handle_hms_notification_resultTupleSchemeFactory());
+    }
+
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(handle_hms_notification_result.class, metaDataMap);
+    }
+
+    public handle_hms_notification_result() {
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public handle_hms_notification_result(handle_hms_notification_result other) {
+    }
+
+    public handle_hms_notification_result deepCopy() {
+      return new handle_hms_notification_result(this);
+    }
+
+    @Override
+    public void clear() {
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof handle_hms_notification_result)
+        return this.equals((handle_hms_notification_result)that);
+      return false;
+    }
+
+    public boolean equals(handle_hms_notification_result that) {
+      if (that == null)
+        return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      HashCodeBuilder builder = new HashCodeBuilder();
+
+      return builder.toHashCode();
+    }
+
+    public int compareTo(handle_hms_notification_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+      handle_hms_notification_result typedOther = (handle_hms_notification_result)other;
+
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("handle_hms_notification_result(");
+      boolean first = true;
+
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class handle_hms_notification_resultStandardSchemeFactory implements SchemeFactory {
+      public handle_hms_notification_resultStandardScheme getScheme() {
+        return new handle_hms_notification_resultStandardScheme();
+      }
+    }
+
+    private static class handle_hms_notification_resultStandardScheme extends StandardScheme<handle_hms_notification_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, handle_hms_notification_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, handle_hms_notification_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class handle_hms_notification_resultTupleSchemeFactory implements SchemeFactory {
+      public handle_hms_notification_resultTupleScheme getScheme() {
+        return new handle_hms_notification_resultTupleScheme();
+      }
+    }
+
+    private static class handle_hms_notification_resultTupleScheme extends TupleScheme<handle_hms_notification_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, handle_hms_notification_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, handle_hms_notification_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+      }
+    }
+
+  }
+
+  public static class check_hms_seq_num_args implements org.apache.thrift.TBase<check_hms_seq_num_args, check_hms_seq_num_args._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("check_hms_seq_num_args");
+
+    private static final org.apache.thrift.protocol.TField PATH_SEQ_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("pathSeqNum", org.apache.thrift.protocol.TType.I64, (short)1);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new check_hms_seq_num_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new check_hms_seq_num_argsTupleSchemeFactory());
+    }
+
+    private long pathSeqNum; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      PATH_SEQ_NUM((short)1, "pathSeqNum");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // PATH_SEQ_NUM
+            return PATH_SEQ_NUM;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    private static final int __PATHSEQNUM_ISSET_ID = 0;
+    private byte __isset_bitfield = 0;
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.PATH_SEQ_NUM, new org.apache.thrift.meta_data.FieldMetaData("pathSeqNum", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(check_hms_seq_num_args.class, metaDataMap);
+    }
+
+    public check_hms_seq_num_args() {
+    }
+
+    public check_hms_seq_num_args(
+      long pathSeqNum)
+    {
+      this();
+      this.pathSeqNum = pathSeqNum;
+      setPathSeqNumIsSet(true);
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public check_hms_seq_num_args(check_hms_seq_num_args other) {
+      __isset_bitfield = other.__isset_bitfield;
+      this.pathSeqNum = other.pathSeqNum;
+    }
+
+    public check_hms_seq_num_args deepCopy() {
+      return new check_hms_seq_num_args(this);
+    }
+
+    @Override
+    public void clear() {
+      setPathSeqNumIsSet(false);
+      this.pathSeqNum = 0;
+    }
+
+    public long getPathSeqNum() {
+      return this.pathSeqNum;
+    }
+
+    public void setPathSeqNum(long pathSeqNum) {
+      this.pathSeqNum = pathSeqNum;
+      setPathSeqNumIsSet(true);
+    }
+
+    public void unsetPathSeqNum() {
+      __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PATHSEQNUM_ISSET_ID);
+    }
+
+    /** Returns true if field pathSeqNum is set (has been assigned a value) and false otherwise */
+    public boolean isSetPathSeqNum() {
+      return EncodingUtils.testBit(__isset_bitfield, __PATHSEQNUM_ISSET_ID);
+    }
+
+    public void setPathSeqNumIsSet(boolean value) {
+      __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PATHSEQNUM_ISSET_ID, value);
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case PATH_SEQ_NUM:
+        if (value == null) {
+          unsetPathSeqNum();
+        } else {
+          setPathSeqNum((Long)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case PATH_SEQ_NUM:
+        return Long.valueOf(getPathSeqNum());
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case PATH_SEQ_NUM:
+        return isSetPathSeqNum();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof check_hms_seq_num_args)
+        return this.equals((check_hms_seq_num_args)that);
+      return false;
+    }
+
+    public boolean equals(check_hms_seq_num_args that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_pathSeqNum = true;
+      boolean that_present_pathSeqNum = true;
+      if (this_present_pathSeqNum || that_present_pathSeqNum) {
+        if (!(this_present_pathSeqNum && that_present_pathSeqNum))
+          return false;
+        if (this.pathSeqNum != that.pathSeqNum)
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      HashCodeBuilder builder = new HashCodeBuilder();
+
+      boolean present_pathSeqNum = true;
+      builder.append(present_pathSeqNum);
+      if (present_pathSeqNum)
+        builder.append(pathSeqNum);
+
+      return builder.toHashCode();
+    }
+
+    public int compareTo(check_hms_seq_num_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+      check_hms_seq_num_args typedOther = (check_hms_seq_num_args)other;
+
+      lastComparison = Boolean.valueOf(isSetPathSeqNum()).compareTo(typedOther.isSetPathSeqNum());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetPathSeqNum()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pathSeqNum, typedOther.pathSeqNum);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("check_hms_seq_num_args(");
+      boolean first = true;
+
+      sb.append("pathSeqNum:");
+      sb.append(this.pathSeqNum);
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+        __isset_bitfield = 0;
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class check_hms_seq_num_argsStandardSchemeFactory implements SchemeFactory {
+      public check_hms_seq_num_argsStandardScheme getScheme() {
+        return new check_hms_seq_num_argsStandardScheme();
+      }
+    }
+
+    private static class check_hms_seq_num_argsStandardScheme extends StandardScheme<check_hms_seq_num_args> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, check_hms_seq_num_args struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 1: // PATH_SEQ_NUM
+              if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+                struct.pathSeqNum = iprot.readI64();
+                struct.setPathSeqNumIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, check_hms_seq_num_args struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        oprot.writeFieldBegin(PATH_SEQ_NUM_FIELD_DESC);
+        oprot.writeI64(struct.pathSeqNum);
+        oprot.writeFieldEnd();
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class check_hms_seq_num_argsTupleSchemeFactory implements SchemeFactory {
+      public check_hms_seq_num_argsTupleScheme getScheme() {
+        return new check_hms_seq_num_argsTupleScheme();
+      }
+    }
+
+    private static class check_hms_seq_num_argsTupleScheme extends TupleScheme<check_hms_seq_num_args> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, check_hms_seq_num_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetPathSeqNum()) {
+          optionals.set(0);
+        }
+        oprot.writeBitSet(optionals, 1);
+        if (struct.isSetPathSeqNum()) {
+          oprot.writeI64(struct.pathSeqNum);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, check_hms_seq_num_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(1);
+        if (incoming.get(0)) {
+          struct.pathSeqNum = iprot.readI64();
+          struct.setPathSeqNumIsSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class check_hms_seq_num_result implements org.apache.thrift.TBase<check_hms_seq_num_result, check_hms_seq_num_result._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("check_hms_seq_num_result");
+
+    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I64, (short)0);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new check_hms_seq_num_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new check_hms_seq_num_resultTupleSchemeFactory());
+    }
+
+    private long success; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      SUCCESS((short)0, "success");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 0: // SUCCESS
+            return SUCCESS;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    private static final int __SUCCESS_ISSET_ID = 0;
+    private byte __isset_bitfield = 0;
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(check_hms_seq_num_result.class, metaDataMap);
+    }
+
+    public check_hms_seq_num_result() {
+    }
+
+    public check_hms_seq_num_result(
+      long success)
+    {
+      this();
+      this.success = success;
+      setSuccessIsSet(true);
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public check_hms_seq_num_result(check_hms_seq_num_result other) {
+      __isset_bitfield = other.__isset_bitfield;
+      this.success = other.success;
+    }
+
+    public check_hms_seq_num_result deepCopy() {
+      return new check_hms_seq_num_result(this);
+    }
+
+    @Override
+    public void clear() {
+      setSuccessIsSet(false);
+      this.success = 0;
+    }
+
+    public long getSuccess() {
+      return this.success;
+    }
+
+    public void setSuccess(long success) {
+      this.success = success;
+      setSuccessIsSet(true);
+    }
+
+    public void unsetSuccess() {
+      __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
+    }
+
+    /** Returns true if field success is set (has been assigned a value) and false otherwise */
+    public boolean isSetSuccess() {
+      return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID);
+    }
+
+    public void setSuccessIsSet(boolean value) {
+      __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case SUCCESS:
+        if (value == null) {
+          unsetSuccess();
+        } else {
+          setSuccess((Long)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case SUCCESS:
+        return Long.valueOf(getSuccess());
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case SUCCESS:
+        return isSetSuccess();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof check_hms_seq_num_result)
+        return this.equals((check_hms_seq_num_result)that);
+      return false;
+    }
+
+    public boolean equals(check_hms_seq_num_result that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_success = true;
+      boolean that_present_success = true;
+      if (this_present_success || that_present_success) {
+        if (!(this_present_success && that_present_success))
+          return false;
+        if (this.success != that.success)
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      HashCodeBuilder builder = new HashCodeBuilder();
+
+      boolean present_success = true;
+      builder.append(present_success);
+      if (present_success)
+        builder.append(success);
+
+      return builder.toHashCode();
+    }
+
+    public int compareTo(check_hms_seq_num_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+      check_hms_seq_num_result typedOther = (check_hms_seq_num_result)other;
+
+      lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetSuccess()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("check_hms_seq_num_result(");
+      boolean first = true;
+
+      sb.append("success:");
+      sb.append(this.success);
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+        __isset_bitfield = 0;
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class check_hms_seq_num_resultStandardSchemeFactory implements SchemeFactory {
+      public check_hms_seq_num_resultStandardScheme getScheme() {
+        return new check_hms_seq_num_resultStandardScheme();
+      }
+    }
+
+    private static class check_hms_seq_num_resultStandardScheme extends StandardScheme<check_hms_seq_num_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, check_hms_seq_num_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 0: // SUCCESS
+              if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+                struct.success = iprot.readI64();
+                struct.setSuccessIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, check_hms_seq_num_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.isSetSuccess()) {
+          oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+          oprot.writeI64(struct.success);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class check_hms_seq_num_resultTupleSchemeFactory implements SchemeFactory {
+      public check_hms_seq_num_resultTupleScheme getScheme() {
+        return new check_hms_seq_num_resultTupleScheme();
+      }
+    }
+
+    private static class check_hms_seq_num_resultTupleScheme extends TupleScheme<check_hms_seq_num_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, check_hms_seq_num_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetSuccess()) {
+          optionals.set(0);
+        }
+        oprot.writeBitSet(optionals, 1);
+        if (struct.isSetSuccess()) {
+          oprot.writeI64(struct.success);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, check_hms_seq_num_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(1);
+        if (incoming.get(0)) {
+          struct.success = iprot.readI64();
+          struct.setSuccessIsSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class get_all_authz_updates_from_args implements org.apache.thrift.TBase<get_all_authz_updates_from_args, get_all_authz_updates_from_args._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_authz_updates_from_args");
+
+    private static final org.apache.thrift.protocol.TField PERM_SEQ_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("permSeqNum", org.apache.thrift.protocol.TType.I64, (short)1);
+    private static final org.apache.thrift.protocol.TField PATH_SEQ_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("pathSeqNum", org.apache.thrift.protocol.TType.I64, (short)2);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new get_all_authz_updates_from_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_all_authz_updates_from_argsTupleSchemeFactory());
+    }
+
+    private long permSeqNum; // required
+    private long pathSeqNum; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      PERM_SEQ_NUM((short)1, "permSeqNum"),
+      PATH_SEQ_NUM((short)2, "pathSeqNum");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // PERM_SEQ_NUM
+            return PERM_SEQ_NUM;
+          case 2: // PATH_SEQ_NUM
+            return PATH_SEQ_NUM;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    private static final int __PERMSEQNUM_ISSET_ID = 0;
+    private static final int __PATHSEQNUM_ISSET_ID = 1;
+    private byte __isset_bitfield = 0;
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.PERM_SEQ_NUM, new org.apache.thrift.meta_data.FieldMetaData("permSeqNum", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+      tmpMap.put(_Fields.PATH_SEQ_NUM, new org.apache.thrift.meta_data.FieldMetaData("pathSeqNum", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_authz_updates_from_args.class, metaDataMap);
+    }
+
+    public get_all_authz_updates_from_args() {
+    }
+
+    public get_all_authz_updates_from_args(
+      long permSeqNum,
+      long pathSeqNum)
+    {
+      this();
+      this.permSeqNum = permSeqNum;
+      setPermSeqNumIsSet(true);
+      this.pathSeqNum = pathSeqNum;
+      setPathSeqNumIsSet(true);
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public get_all_authz_updates_from_args(get_all_authz_updates_from_args other) {
+      __isset_bitfield = other.__isset_bitfield;
+      this.permSeqNum = other.permSeqNum;
+      this.pathSeqNum = other.pathSeqNum;
+    }
+
+    public get_all_authz_updates_from_args deepCopy() {
+      return new get_all_authz_updates_from_args(this);
+    }
+
+    @Override
+    public void clear() {
+      setPermSeqNumIsSet(false);
+      this.permSeqNum = 0;
+      setPathSeqNumIsSet(false);
+      this.pathSeqNum = 0;
+    }
+
+    public long getPermSeqNum() {
+      return this.permSeqNum;
+    }
+
+    public void setPermSeqNum(long permSeqNum) {
+      this.permSeqNum = permSeqNum;
+      setPermSeqNumIsSet(true);
+    }
+
+    public void unsetPermSeqNum() {
+      __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PERMSEQNUM_ISSET_ID);
+    }
+
+    /** Returns true if field permSeqNum is set (has been assigned a value) and false otherwise */
+    public boolean isSetPermSeqNum() {
+      return EncodingUtils.testBit(__isset_bitfield, __PERMSEQNUM_ISSET_ID);
+    }
+
+    public void setPermSeqNumIsSet(boolean value) {
+      __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PERMSEQNUM_ISSET_ID, value);
+    }
+
+    public long getPathSeqNum() {
+      return this.pathSeqNum;
+    }
+
+    public void setPathSeqNum(long pathSeqNum) {
+      this.pathSeqNum = pathSeqNum;
+      setPathSeqNumIsSet(true);
+    }
+
+    public void unsetPathSeqNum() {
+      __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PATHSEQNUM_ISSET_ID);
+    }
+
+    /** Returns true if field pathSeqNum is set (has been assigned a value) and false otherwise */
+    public boolean isSetPathSeqNum() {
+      return EncodingUtils.testBit(__isset_bitfield, __PATHSEQNUM_ISSET_ID);
+    }
+
+    public void setPathSeqNumIsSet(boolean value) {
+      __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PATHSEQNUM_ISSET_ID, value);
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case PERM_SEQ_NUM:
+        if (value == null) {
+          unsetPermSeqNum();
+        } else {
+          setPermSeqNum((Long)value);
+        }
+        break;
+
+      case PATH_SEQ_NUM:
+        if (value == null) {
+          unsetPathSeqNum();
+        } else {
+          setPathSeqNum((Long)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case PERM_SEQ_NUM:
+        return Long.valueOf(getPermSeqNum());
+
+      case PATH_SEQ_NUM:
+        return Long.valueOf(getPathSeqNum());
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case PERM_SEQ_NUM:
+        return isSetPermSeqNum();
+      case PATH_SEQ_NUM:
+        return isSetPathSeqNum();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof get_all_authz_updates_from_args)
+        return this.equals((get_all_authz_updates_from_args)that);
+      return false;
+    }
+
+    public boolean equals(get_all_authz_updates_from_args that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_permSeqNum = true;
+      boolean that_present_permSeqNum = true;
+      if (this_present_permSeqNum || that_present_permSeqNum) {
+        if (!(this_present_permSeqNum && that_present_permSeqNum))
+          return false;
+        if (this.permSeqNum != that.permSeqNum)
+          return false;
+      }
+
+      boolean this_present_pathSeqNum = true;
+      boolean that_present_pathSeqNum = true;
+      if (this_present_pathSeqNum || that_present_pathSeqNum) {
+        if (!(this_present_pathSeqNum && that_present_pathSeqNum))
+          return false;
+        if (this.pathSeqNum != that.pathSeqNum)
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      HashCodeBuilder builder = new HashCodeBuilder();
+
+      boolean present_permSeqNum = true;
+      builder.append(present_permSeqNum);
+      if (present_permSeqNum)
+        builder.append(permSeqNum);
+
+      boolean present_pathSeqNum = true;
+      builder.append(present_pathSeqNum);
+      if (present_pathSeqNum)
+        builder.append(pathSeqNum);
+
+      return builder.toHashCode();
+    }
+
+    public int compareTo(get_all_authz_updates_from_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+      get_all_authz_updates_from_args typedOther = (get_all_authz_updates_from_args)other;
+
+      lastComparison = Boolean.valueOf(isSetPermSeqNum()).compareTo(typedOther.isSetPermSeqNum());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetPermSeqNum()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.permSeqNum, typedOther.permSeqNum);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetPathSeqNum()).compareTo(typedOther.isSetPathSeqNum());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetPathSeqNum()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pathSeqNum, typedOther.pathSeqNum);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("get_all_authz_updates_from_args(");
+      boolean first = true;
+
+      sb.append("permSeqNum:");
+      sb.append(this.permSeqNum);
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("pathSeqNum:");
+      sb.append(this.pathSeqNum);
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+        __isset_bitfield = 0;
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class get_all_authz_updates_from_argsStandardSchemeFactory implements SchemeFactory {
+      public get_all_authz_updates_from_argsStandardScheme getScheme() {
+        return new get_all_authz_updates_from_argsStandardScheme();
+      }
+    }
+
+    private static class get_all_authz_updates_from_argsStandardScheme extends StandardScheme<get_all_authz_updates_from_args> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_authz_updates_from_args struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 1: // PERM_SEQ_NUM
+              if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+                struct.permSeqNum = iprot.readI64();
+                struct.setPermSeqNumIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 2: // PATH_SEQ_NUM
+              if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+                struct.pathSeqNum = iprot.readI64();
+                struct.setPathSeqNumIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_authz_updates_from_args struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        oprot.writeFieldBegin(PERM_SEQ_NUM_FIELD_DESC);
+        oprot.writeI64(struct.permSeqNum);
+        oprot.writeFieldEnd();
+        oprot.writeFieldBegin(PATH_SEQ_NUM_FIELD_DESC);
+        oprot.writeI64(struct.pathSeqNum);
+        oprot.writeFieldEnd();
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class get_all_authz_updates_from_argsTupleSchemeFactory implements SchemeFactory {
+      public get_all_authz_updates_from_argsTupleScheme getScheme() {
+        return new get_all_authz_updates_from_argsTupleScheme();
+      }
+    }
+
+    private static class get_all_authz_updates_from_argsTupleScheme extends TupleScheme<get_all_authz_updates_from_args> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, get_all_authz_updates_from_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetPermSeqNum()) {
+          optionals.set(0);
+        }
+        if (struct.isSetPathSeqNum()) {
+          optionals.set(1);
+        }
+        oprot.writeBitSet(optionals, 2);
+        if (struct.isSetPermSeqNum()) {
+          oprot.writeI64(struct.permSeqNum);
+        }
+        if (struct.isSetPathSeqNum()) {
+          oprot.writeI64(struct.pathSeqNum);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, get_all_authz_updates_from_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(2);
+        if (incoming.get(0)) {
+          struct.permSeqNum = iprot.readI64();
+          struct.setPermSeqNumIsSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.pathSeqNum = iprot.readI64();
+          struct.setPathSeqNumIsSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class get_all_authz_updates_from_result implements org.apache.thrift.TBase<get_all_authz_updates_from_result, get_all_authz_updates_from_result._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_authz_updates_from_result");
+
+    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new get_all_authz_updates_from_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_all_authz_updates_from_resultTupleSchemeFactory());
+    }
+
+    private TAuthzUpdateResponse success; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      SUCCESS((short)0, "success");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 0: // SUCCESS
+            return SUCCESS;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAuthzUpdateResponse.class)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_authz_updates_from_result.class, metaDataMap);
+    }
+
+    public get_all_authz_updates_from_result() {
+    }
+
+    public get_all_authz_updates_from_result(
+      TAuthzUpdateResponse success)
+    {
+      this();
+      this.success = success;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public get_all_authz_updates_from_result(get_all_authz_updates_from_result other) {
+      if (other.isSetSuccess()) {
+        this.success = new TAuthzUpdateResponse(other.success);
+      }
+    }
+
+    public get_all_authz_updates_from_result deepCopy() {
+      return new get_all_authz_updates_from_result(this);
+    }
+
+    @Override
+    public void clear() {
+      this.success = null;
+    }
+
+    public TAuthzUpdateResponse getSuccess() {
+      return this.success;
+    }
+
+    public void setSuccess(TAuthzUpdateResponse success) {
+      this.success = success;
+    }
+
+    public void unsetSuccess() {
+      this.success = null;
+    }
+
+    /** Returns true if field success is set (has been assigned a value) and false otherwise */
+    public boolean isSetSuccess() {
+      return this.success != null;
+    }
+
+    public void setSuccessIsSet(boolean value) {
+      if (!value) {
+        this.success = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case SUCCESS:
+        if (value == null) {
+          unsetSuccess();
+        } else {
+          setSuccess((TAuthzUpdateResponse)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case SUCCESS:
+        return getSuccess();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case SUCCESS:
+        return isSetSuccess();
+      }
+      throw new IllegalStateExcepti

<TRUNCATED>

[5/9] incubator-sentry git commit: SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

Posted by ls...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPrivilegeChanges.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPrivilegeChanges.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPrivilegeChanges.java
new file mode 100644
index 0000000..76720b9
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPrivilegeChanges.java
@@ -0,0 +1,713 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.sentry.hdfs.service.thrift;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TPrivilegeChanges implements org.apache.thrift.TBase<TPrivilegeChanges, TPrivilegeChanges._Fields>, java.io.Serializable, Cloneable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPrivilegeChanges");
+
+  private static final org.apache.thrift.protocol.TField AUTHZ_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("authzObj", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField ADD_PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("addPrivileges", org.apache.thrift.protocol.TType.MAP, (short)2);
+  private static final org.apache.thrift.protocol.TField DEL_PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("delPrivileges", org.apache.thrift.protocol.TType.MAP, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TPrivilegeChangesStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TPrivilegeChangesTupleSchemeFactory());
+  }
+
+  private String authzObj; // required
+  private Map<String,String> addPrivileges; // required
+  private Map<String,String> delPrivileges; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    AUTHZ_OBJ((short)1, "authzObj"),
+    ADD_PRIVILEGES((short)2, "addPrivileges"),
+    DEL_PRIVILEGES((short)3, "delPrivileges");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // AUTHZ_OBJ
+          return AUTHZ_OBJ;
+        case 2: // ADD_PRIVILEGES
+          return ADD_PRIVILEGES;
+        case 3: // DEL_PRIVILEGES
+          return DEL_PRIVILEGES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.AUTHZ_OBJ, new org.apache.thrift.meta_data.FieldMetaData("authzObj", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ADD_PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("addPrivileges", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.DEL_PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("delPrivileges", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPrivilegeChanges.class, metaDataMap);
+  }
+
+  public TPrivilegeChanges() {
+  }
+
+  public TPrivilegeChanges(
+    String authzObj,
+    Map<String,String> addPrivileges,
+    Map<String,String> delPrivileges)
+  {
+    this();
+    this.authzObj = authzObj;
+    this.addPrivileges = addPrivileges;
+    this.delPrivileges = delPrivileges;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TPrivilegeChanges(TPrivilegeChanges other) {
+    if (other.isSetAuthzObj()) {
+      this.authzObj = other.authzObj;
+    }
+    if (other.isSetAddPrivileges()) {
+      Map<String,String> __this__addPrivileges = new HashMap<String,String>();
+      for (Map.Entry<String, String> other_element : other.addPrivileges.entrySet()) {
+
+        String other_element_key = other_element.getKey();
+        String other_element_value = other_element.getValue();
+
+        String __this__addPrivileges_copy_key = other_element_key;
+
+        String __this__addPrivileges_copy_value = other_element_value;
+
+        __this__addPrivileges.put(__this__addPrivileges_copy_key, __this__addPrivileges_copy_value);
+      }
+      this.addPrivileges = __this__addPrivileges;
+    }
+    if (other.isSetDelPrivileges()) {
+      Map<String,String> __this__delPrivileges = new HashMap<String,String>();
+      for (Map.Entry<String, String> other_element : other.delPrivileges.entrySet()) {
+
+        String other_element_key = other_element.getKey();
+        String other_element_value = other_element.getValue();
+
+        String __this__delPrivileges_copy_key = other_element_key;
+
+        String __this__delPrivileges_copy_value = other_element_value;
+
+        __this__delPrivileges.put(__this__delPrivileges_copy_key, __this__delPrivileges_copy_value);
+      }
+      this.delPrivileges = __this__delPrivileges;
+    }
+  }
+
+  public TPrivilegeChanges deepCopy() {
+    return new TPrivilegeChanges(this);
+  }
+
+  @Override
+  public void clear() {
+    this.authzObj = null;
+    this.addPrivileges = null;
+    this.delPrivileges = null;
+  }
+
+  public String getAuthzObj() {
+    return this.authzObj;
+  }
+
+  public void setAuthzObj(String authzObj) {
+    this.authzObj = authzObj;
+  }
+
+  public void unsetAuthzObj() {
+    this.authzObj = null;
+  }
+
+  /** Returns true if field authzObj is set (has been assigned a value) and false otherwise */
+  public boolean isSetAuthzObj() {
+    return this.authzObj != null;
+  }
+
+  public void setAuthzObjIsSet(boolean value) {
+    if (!value) {
+      this.authzObj = null;
+    }
+  }
+
+  public int getAddPrivilegesSize() {
+    return (this.addPrivileges == null) ? 0 : this.addPrivileges.size();
+  }
+
+  public void putToAddPrivileges(String key, String val) {
+    if (this.addPrivileges == null) {
+      this.addPrivileges = new HashMap<String,String>();
+    }
+    this.addPrivileges.put(key, val);
+  }
+
+  public Map<String,String> getAddPrivileges() {
+    return this.addPrivileges;
+  }
+
+  public void setAddPrivileges(Map<String,String> addPrivileges) {
+    this.addPrivileges = addPrivileges;
+  }
+
+  public void unsetAddPrivileges() {
+    this.addPrivileges = null;
+  }
+
+  /** Returns true if field addPrivileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetAddPrivileges() {
+    return this.addPrivileges != null;
+  }
+
+  public void setAddPrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.addPrivileges = null;
+    }
+  }
+
+  public int getDelPrivilegesSize() {
+    return (this.delPrivileges == null) ? 0 : this.delPrivileges.size();
+  }
+
+  public void putToDelPrivileges(String key, String val) {
+    if (this.delPrivileges == null) {
+      this.delPrivileges = new HashMap<String,String>();
+    }
+    this.delPrivileges.put(key, val);
+  }
+
+  public Map<String,String> getDelPrivileges() {
+    return this.delPrivileges;
+  }
+
+  public void setDelPrivileges(Map<String,String> delPrivileges) {
+    this.delPrivileges = delPrivileges;
+  }
+
+  public void unsetDelPrivileges() {
+    this.delPrivileges = null;
+  }
+
+  /** Returns true if field delPrivileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetDelPrivileges() {
+    return this.delPrivileges != null;
+  }
+
+  public void setDelPrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.delPrivileges = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case AUTHZ_OBJ:
+      if (value == null) {
+        unsetAuthzObj();
+      } else {
+        setAuthzObj((String)value);
+      }
+      break;
+
+    case ADD_PRIVILEGES:
+      if (value == null) {
+        unsetAddPrivileges();
+      } else {
+        setAddPrivileges((Map<String,String>)value);
+      }
+      break;
+
+    case DEL_PRIVILEGES:
+      if (value == null) {
+        unsetDelPrivileges();
+      } else {
+        setDelPrivileges((Map<String,String>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case AUTHZ_OBJ:
+      return getAuthzObj();
+
+    case ADD_PRIVILEGES:
+      return getAddPrivileges();
+
+    case DEL_PRIVILEGES:
+      return getDelPrivileges();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case AUTHZ_OBJ:
+      return isSetAuthzObj();
+    case ADD_PRIVILEGES:
+      return isSetAddPrivileges();
+    case DEL_PRIVILEGES:
+      return isSetDelPrivileges();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TPrivilegeChanges)
+      return this.equals((TPrivilegeChanges)that);
+    return false;
+  }
+
+  public boolean equals(TPrivilegeChanges that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_authzObj = true && this.isSetAuthzObj();
+    boolean that_present_authzObj = true && that.isSetAuthzObj();
+    if (this_present_authzObj || that_present_authzObj) {
+      if (!(this_present_authzObj && that_present_authzObj))
+        return false;
+      if (!this.authzObj.equals(that.authzObj))
+        return false;
+    }
+
+    boolean this_present_addPrivileges = true && this.isSetAddPrivileges();
+    boolean that_present_addPrivileges = true && that.isSetAddPrivileges();
+    if (this_present_addPrivileges || that_present_addPrivileges) {
+      if (!(this_present_addPrivileges && that_present_addPrivileges))
+        return false;
+      if (!this.addPrivileges.equals(that.addPrivileges))
+        return false;
+    }
+
+    boolean this_present_delPrivileges = true && this.isSetDelPrivileges();
+    boolean that_present_delPrivileges = true && that.isSetDelPrivileges();
+    if (this_present_delPrivileges || that_present_delPrivileges) {
+      if (!(this_present_delPrivileges && that_present_delPrivileges))
+        return false;
+      if (!this.delPrivileges.equals(that.delPrivileges))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_authzObj = true && (isSetAuthzObj());
+    builder.append(present_authzObj);
+    if (present_authzObj)
+      builder.append(authzObj);
+
+    boolean present_addPrivileges = true && (isSetAddPrivileges());
+    builder.append(present_addPrivileges);
+    if (present_addPrivileges)
+      builder.append(addPrivileges);
+
+    boolean present_delPrivileges = true && (isSetDelPrivileges());
+    builder.append(present_delPrivileges);
+    if (present_delPrivileges)
+      builder.append(delPrivileges);
+
+    return builder.toHashCode();
+  }
+
+  public int compareTo(TPrivilegeChanges other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+    TPrivilegeChanges typedOther = (TPrivilegeChanges)other;
+
+    lastComparison = Boolean.valueOf(isSetAuthzObj()).compareTo(typedOther.isSetAuthzObj());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAuthzObj()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authzObj, typedOther.authzObj);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAddPrivileges()).compareTo(typedOther.isSetAddPrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAddPrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.addPrivileges, typedOther.addPrivileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDelPrivileges()).compareTo(typedOther.isSetDelPrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDelPrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delPrivileges, typedOther.delPrivileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TPrivilegeChanges(");
+    boolean first = true;
+
+    sb.append("authzObj:");
+    if (this.authzObj == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.authzObj);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("addPrivileges:");
+    if (this.addPrivileges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.addPrivileges);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("delPrivileges:");
+    if (this.delPrivileges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.delPrivileges);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetAuthzObj()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'authzObj' is unset! Struct:" + toString());
+    }
+
+    if (!isSetAddPrivileges()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'addPrivileges' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDelPrivileges()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'delPrivileges' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TPrivilegeChangesStandardSchemeFactory implements SchemeFactory {
+    public TPrivilegeChangesStandardScheme getScheme() {
+      return new TPrivilegeChangesStandardScheme();
+    }
+  }
+
+  private static class TPrivilegeChangesStandardScheme extends StandardScheme<TPrivilegeChanges> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TPrivilegeChanges struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // AUTHZ_OBJ
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.authzObj = iprot.readString();
+              struct.setAuthzObjIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // ADD_PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map58 = iprot.readMapBegin();
+                struct.addPrivileges = new HashMap<String,String>(2*_map58.size);
+                for (int _i59 = 0; _i59 < _map58.size; ++_i59)
+                {
+                  String _key60; // required
+                  String _val61; // required
+                  _key60 = iprot.readString();
+                  _val61 = iprot.readString();
+                  struct.addPrivileges.put(_key60, _val61);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setAddPrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // DEL_PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map62 = iprot.readMapBegin();
+                struct.delPrivileges = new HashMap<String,String>(2*_map62.size);
+                for (int _i63 = 0; _i63 < _map62.size; ++_i63)
+                {
+                  String _key64; // required
+                  String _val65; // required
+                  _key64 = iprot.readString();
+                  _val65 = iprot.readString();
+                  struct.delPrivileges.put(_key64, _val65);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setDelPrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TPrivilegeChanges struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.authzObj != null) {
+        oprot.writeFieldBegin(AUTHZ_OBJ_FIELD_DESC);
+        oprot.writeString(struct.authzObj);
+        oprot.writeFieldEnd();
+      }
+      if (struct.addPrivileges != null) {
+        oprot.writeFieldBegin(ADD_PRIVILEGES_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.addPrivileges.size()));
+          for (Map.Entry<String, String> _iter66 : struct.addPrivileges.entrySet())
+          {
+            oprot.writeString(_iter66.getKey());
+            oprot.writeString(_iter66.getValue());
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.delPrivileges != null) {
+        oprot.writeFieldBegin(DEL_PRIVILEGES_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.delPrivileges.size()));
+          for (Map.Entry<String, String> _iter67 : struct.delPrivileges.entrySet())
+          {
+            oprot.writeString(_iter67.getKey());
+            oprot.writeString(_iter67.getValue());
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TPrivilegeChangesTupleSchemeFactory implements SchemeFactory {
+    public TPrivilegeChangesTupleScheme getScheme() {
+      return new TPrivilegeChangesTupleScheme();
+    }
+  }
+
+  private static class TPrivilegeChangesTupleScheme extends TupleScheme<TPrivilegeChanges> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TPrivilegeChanges struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.authzObj);
+      {
+        oprot.writeI32(struct.addPrivileges.size());
+        for (Map.Entry<String, String> _iter68 : struct.addPrivileges.entrySet())
+        {
+          oprot.writeString(_iter68.getKey());
+          oprot.writeString(_iter68.getValue());
+        }
+      }
+      {
+        oprot.writeI32(struct.delPrivileges.size());
+        for (Map.Entry<String, String> _iter69 : struct.delPrivileges.entrySet())
+        {
+          oprot.writeString(_iter69.getKey());
+          oprot.writeString(_iter69.getValue());
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TPrivilegeChanges struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.authzObj = iprot.readString();
+      struct.setAuthzObjIsSet(true);
+      {
+        org.apache.thrift.protocol.TMap _map70 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.addPrivileges = new HashMap<String,String>(2*_map70.size);
+        for (int _i71 = 0; _i71 < _map70.size; ++_i71)
+        {
+          String _key72; // required
+          String _val73; // required
+          _key72 = iprot.readString();
+          _val73 = iprot.readString();
+          struct.addPrivileges.put(_key72, _val73);
+        }
+      }
+      struct.setAddPrivilegesIsSet(true);
+      {
+        org.apache.thrift.protocol.TMap _map74 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.delPrivileges = new HashMap<String,String>(2*_map74.size);
+        for (int _i75 = 0; _i75 < _map74.size; ++_i75)
+        {
+          String _key76; // required
+          String _val77; // required
+          _key76 = iprot.readString();
+          _val77 = iprot.readString();
+          struct.delPrivileges.put(_key76, _val77);
+        }
+      }
+      struct.setDelPrivilegesIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TRoleChanges.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TRoleChanges.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TRoleChanges.java
new file mode 100644
index 0000000..87ef02d
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TRoleChanges.java
@@ -0,0 +1,691 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.sentry.hdfs.service.thrift;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TRoleChanges implements org.apache.thrift.TBase<TRoleChanges, TRoleChanges._Fields>, java.io.Serializable, Cloneable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRoleChanges");
+
+  private static final org.apache.thrift.protocol.TField ROLE_FIELD_DESC = new org.apache.thrift.protocol.TField("role", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField ADD_GROUPS_FIELD_DESC = new org.apache.thrift.protocol.TField("addGroups", org.apache.thrift.protocol.TType.LIST, (short)2);
+  private static final org.apache.thrift.protocol.TField DEL_GROUPS_FIELD_DESC = new org.apache.thrift.protocol.TField("delGroups", org.apache.thrift.protocol.TType.LIST, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TRoleChangesStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TRoleChangesTupleSchemeFactory());
+  }
+
+  private String role; // required
+  private List<String> addGroups; // required
+  private List<String> delGroups; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    ROLE((short)1, "role"),
+    ADD_GROUPS((short)2, "addGroups"),
+    DEL_GROUPS((short)3, "delGroups");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // ROLE
+          return ROLE;
+        case 2: // ADD_GROUPS
+          return ADD_GROUPS;
+        case 3: // DEL_GROUPS
+          return DEL_GROUPS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.ROLE, new org.apache.thrift.meta_data.FieldMetaData("role", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ADD_GROUPS, new org.apache.thrift.meta_data.FieldMetaData("addGroups", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.DEL_GROUPS, new org.apache.thrift.meta_data.FieldMetaData("delGroups", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRoleChanges.class, metaDataMap);
+  }
+
+  public TRoleChanges() {
+  }
+
+  public TRoleChanges(
+    String role,
+    List<String> addGroups,
+    List<String> delGroups)
+  {
+    this();
+    this.role = role;
+    this.addGroups = addGroups;
+    this.delGroups = delGroups;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TRoleChanges(TRoleChanges other) {
+    if (other.isSetRole()) {
+      this.role = other.role;
+    }
+    if (other.isSetAddGroups()) {
+      List<String> __this__addGroups = new ArrayList<String>();
+      for (String other_element : other.addGroups) {
+        __this__addGroups.add(other_element);
+      }
+      this.addGroups = __this__addGroups;
+    }
+    if (other.isSetDelGroups()) {
+      List<String> __this__delGroups = new ArrayList<String>();
+      for (String other_element : other.delGroups) {
+        __this__delGroups.add(other_element);
+      }
+      this.delGroups = __this__delGroups;
+    }
+  }
+
+  public TRoleChanges deepCopy() {
+    return new TRoleChanges(this);
+  }
+
+  @Override
+  public void clear() {
+    this.role = null;
+    this.addGroups = null;
+    this.delGroups = null;
+  }
+
+  public String getRole() {
+    return this.role;
+  }
+
+  public void setRole(String role) {
+    this.role = role;
+  }
+
+  public void unsetRole() {
+    this.role = null;
+  }
+
+  /** Returns true if field role is set (has been assigned a value) and false otherwise */
+  public boolean isSetRole() {
+    return this.role != null;
+  }
+
+  public void setRoleIsSet(boolean value) {
+    if (!value) {
+      this.role = null;
+    }
+  }
+
+  public int getAddGroupsSize() {
+    return (this.addGroups == null) ? 0 : this.addGroups.size();
+  }
+
+  public java.util.Iterator<String> getAddGroupsIterator() {
+    return (this.addGroups == null) ? null : this.addGroups.iterator();
+  }
+
+  public void addToAddGroups(String elem) {
+    if (this.addGroups == null) {
+      this.addGroups = new ArrayList<String>();
+    }
+    this.addGroups.add(elem);
+  }
+
+  public List<String> getAddGroups() {
+    return this.addGroups;
+  }
+
+  public void setAddGroups(List<String> addGroups) {
+    this.addGroups = addGroups;
+  }
+
+  public void unsetAddGroups() {
+    this.addGroups = null;
+  }
+
+  /** Returns true if field addGroups is set (has been assigned a value) and false otherwise */
+  public boolean isSetAddGroups() {
+    return this.addGroups != null;
+  }
+
+  public void setAddGroupsIsSet(boolean value) {
+    if (!value) {
+      this.addGroups = null;
+    }
+  }
+
+  public int getDelGroupsSize() {
+    return (this.delGroups == null) ? 0 : this.delGroups.size();
+  }
+
+  public java.util.Iterator<String> getDelGroupsIterator() {
+    return (this.delGroups == null) ? null : this.delGroups.iterator();
+  }
+
+  public void addToDelGroups(String elem) {
+    if (this.delGroups == null) {
+      this.delGroups = new ArrayList<String>();
+    }
+    this.delGroups.add(elem);
+  }
+
+  public List<String> getDelGroups() {
+    return this.delGroups;
+  }
+
+  public void setDelGroups(List<String> delGroups) {
+    this.delGroups = delGroups;
+  }
+
+  public void unsetDelGroups() {
+    this.delGroups = null;
+  }
+
+  /** Returns true if field delGroups is set (has been assigned a value) and false otherwise */
+  public boolean isSetDelGroups() {
+    return this.delGroups != null;
+  }
+
+  public void setDelGroupsIsSet(boolean value) {
+    if (!value) {
+      this.delGroups = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case ROLE:
+      if (value == null) {
+        unsetRole();
+      } else {
+        setRole((String)value);
+      }
+      break;
+
+    case ADD_GROUPS:
+      if (value == null) {
+        unsetAddGroups();
+      } else {
+        setAddGroups((List<String>)value);
+      }
+      break;
+
+    case DEL_GROUPS:
+      if (value == null) {
+        unsetDelGroups();
+      } else {
+        setDelGroups((List<String>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case ROLE:
+      return getRole();
+
+    case ADD_GROUPS:
+      return getAddGroups();
+
+    case DEL_GROUPS:
+      return getDelGroups();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case ROLE:
+      return isSetRole();
+    case ADD_GROUPS:
+      return isSetAddGroups();
+    case DEL_GROUPS:
+      return isSetDelGroups();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TRoleChanges)
+      return this.equals((TRoleChanges)that);
+    return false;
+  }
+
+  public boolean equals(TRoleChanges that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_role = true && this.isSetRole();
+    boolean that_present_role = true && that.isSetRole();
+    if (this_present_role || that_present_role) {
+      if (!(this_present_role && that_present_role))
+        return false;
+      if (!this.role.equals(that.role))
+        return false;
+    }
+
+    boolean this_present_addGroups = true && this.isSetAddGroups();
+    boolean that_present_addGroups = true && that.isSetAddGroups();
+    if (this_present_addGroups || that_present_addGroups) {
+      if (!(this_present_addGroups && that_present_addGroups))
+        return false;
+      if (!this.addGroups.equals(that.addGroups))
+        return false;
+    }
+
+    boolean this_present_delGroups = true && this.isSetDelGroups();
+    boolean that_present_delGroups = true && that.isSetDelGroups();
+    if (this_present_delGroups || that_present_delGroups) {
+      if (!(this_present_delGroups && that_present_delGroups))
+        return false;
+      if (!this.delGroups.equals(that.delGroups))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_role = true && (isSetRole());
+    builder.append(present_role);
+    if (present_role)
+      builder.append(role);
+
+    boolean present_addGroups = true && (isSetAddGroups());
+    builder.append(present_addGroups);
+    if (present_addGroups)
+      builder.append(addGroups);
+
+    boolean present_delGroups = true && (isSetDelGroups());
+    builder.append(present_delGroups);
+    if (present_delGroups)
+      builder.append(delGroups);
+
+    return builder.toHashCode();
+  }
+
+  public int compareTo(TRoleChanges other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+    TRoleChanges typedOther = (TRoleChanges)other;
+
+    lastComparison = Boolean.valueOf(isSetRole()).compareTo(typedOther.isSetRole());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRole()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.role, typedOther.role);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAddGroups()).compareTo(typedOther.isSetAddGroups());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAddGroups()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.addGroups, typedOther.addGroups);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDelGroups()).compareTo(typedOther.isSetDelGroups());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDelGroups()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delGroups, typedOther.delGroups);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TRoleChanges(");
+    boolean first = true;
+
+    sb.append("role:");
+    if (this.role == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.role);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("addGroups:");
+    if (this.addGroups == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.addGroups);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("delGroups:");
+    if (this.delGroups == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.delGroups);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetRole()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'role' is unset! Struct:" + toString());
+    }
+
+    if (!isSetAddGroups()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'addGroups' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDelGroups()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'delGroups' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TRoleChangesStandardSchemeFactory implements SchemeFactory {
+    public TRoleChangesStandardScheme getScheme() {
+      return new TRoleChangesStandardScheme();
+    }
+  }
+
+  private static class TRoleChangesStandardScheme extends StandardScheme<TRoleChanges> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TRoleChanges struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // ROLE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.role = iprot.readString();
+              struct.setRoleIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // ADD_GROUPS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list78 = iprot.readListBegin();
+                struct.addGroups = new ArrayList<String>(_list78.size);
+                for (int _i79 = 0; _i79 < _list78.size; ++_i79)
+                {
+                  String _elem80; // required
+                  _elem80 = iprot.readString();
+                  struct.addGroups.add(_elem80);
+                }
+                iprot.readListEnd();
+              }
+              struct.setAddGroupsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // DEL_GROUPS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list81 = iprot.readListBegin();
+                struct.delGroups = new ArrayList<String>(_list81.size);
+                for (int _i82 = 0; _i82 < _list81.size; ++_i82)
+                {
+                  String _elem83; // required
+                  _elem83 = iprot.readString();
+                  struct.delGroups.add(_elem83);
+                }
+                iprot.readListEnd();
+              }
+              struct.setDelGroupsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TRoleChanges struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.role != null) {
+        oprot.writeFieldBegin(ROLE_FIELD_DESC);
+        oprot.writeString(struct.role);
+        oprot.writeFieldEnd();
+      }
+      if (struct.addGroups != null) {
+        oprot.writeFieldBegin(ADD_GROUPS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.addGroups.size()));
+          for (String _iter84 : struct.addGroups)
+          {
+            oprot.writeString(_iter84);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.delGroups != null) {
+        oprot.writeFieldBegin(DEL_GROUPS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.delGroups.size()));
+          for (String _iter85 : struct.delGroups)
+          {
+            oprot.writeString(_iter85);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TRoleChangesTupleSchemeFactory implements SchemeFactory {
+    public TRoleChangesTupleScheme getScheme() {
+      return new TRoleChangesTupleScheme();
+    }
+  }
+
+  private static class TRoleChangesTupleScheme extends TupleScheme<TRoleChanges> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TRoleChanges struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.role);
+      {
+        oprot.writeI32(struct.addGroups.size());
+        for (String _iter86 : struct.addGroups)
+        {
+          oprot.writeString(_iter86);
+        }
+      }
+      {
+        oprot.writeI32(struct.delGroups.size());
+        for (String _iter87 : struct.delGroups)
+        {
+          oprot.writeString(_iter87);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TRoleChanges struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.role = iprot.readString();
+      struct.setRoleIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list88 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.addGroups = new ArrayList<String>(_list88.size);
+        for (int _i89 = 0; _i89 < _list88.size; ++_i89)
+        {
+          String _elem90; // required
+          _elem90 = iprot.readString();
+          struct.addGroups.add(_elem90);
+        }
+      }
+      struct.setAddGroupsIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list91 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.delGroups = new ArrayList<String>(_list91.size);
+        for (int _i92 = 0; _i92 < _list91.size; ++_i92)
+        {
+          String _elem93; // required
+          _elem93 = iprot.readString();
+          struct.delGroups.add(_elem93);
+        }
+      }
+      struct.setDelGroupsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPaths.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPaths.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPaths.java
new file mode 100644
index 0000000..ba16f4a
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPaths.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+/**
+ * A public interface of the fundamental APIs exposed by the implementing
+ * data structure. The primary client of this interface is the Namenode
+ * plugin.
+ */
+public interface AuthzPaths {
+
+  /**
+   * Check if a Path belongs to the configured prefix set
+   * @param pathElements : A path split into segments
+   * @return Is Path under configured prefix
+   */
+  public boolean isUnderPrefix(String[] pathElements);
+
+  /**
+   * Returns the authorizable Object (database/table) associated with this path.
+   * Unlike {@link #findAuthzObjectExactMatch(String[])}, if not match is
+   * found, it will return the first ancestor that has an associated
+   * authorizable object.
+   * @param pathElements : A path split into segments
+   * @return A authzObject associated with this path
+   */
+  public String findAuthzObject(String[] pathElements);
+
+  /**
+   * Returns the authorizable Object (database/table) associated with this path.
+   * @param pathElements : A path split into segments
+   * @return A authzObject associated with this path
+   */
+  public String findAuthzObjectExactMatch(String[] pathElements);
+
+  /**
+   * Return a Dumper that may return a more optimized over the
+   * wire representation of the internal data-structures.
+   * @return
+   */
+  public AuthzPathsDumper<? extends AuthzPaths> getPathsDump();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPathsDumper.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPathsDumper.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPathsDumper.java
new file mode 100644
index 0000000..2bd2a88
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPathsDumper.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import org.apache.sentry.hdfs.service.thrift.TPathsDump;
+
+public interface AuthzPathsDumper<K extends AuthzPaths> {
+
+  public TPathsDump createPathsDump();
+
+  public K initializeFromDump(TPathsDump pathsDump);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPermissions.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPermissions.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPermissions.java
new file mode 100644
index 0000000..1631ae5
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/AuthzPermissions.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import org.apache.hadoop.fs.permission.AclEntry;
+
+import java.util.List;
+
+public interface AuthzPermissions {
+
+  public List<AclEntry> getAcls(String authzObj);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPaths.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPaths.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPaths.java
new file mode 100644
index 0000000..0e9fc2c
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPaths.java
@@ -0,0 +1,510 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.fs.Path;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * A non thread-safe implementation of {@link AuthzPaths}. It abstracts over the
+ * core data-structures used to efficiently handle request from clients of 
+ * the {@link AuthzPaths} paths. All updates to this class is handled by the
+ * thread safe {@link UpdateableAuthzPaths} class
+ */
+public class HMSPaths implements AuthzPaths {
+
+  @VisibleForTesting
+  static List<String> getPathElements(String path) {
+    path = path.trim();
+    if (path.charAt(0) != Path.SEPARATOR_CHAR) {
+      throw new IllegalArgumentException("It must be an absolute path: " + 
+          path);
+    }
+    List<String> list = new ArrayList<String>(32);
+    int idx = 0;
+    int found = path.indexOf(Path.SEPARATOR_CHAR, idx);
+    while (found > -1) {
+      if (found > idx) {
+        list.add(path.substring(idx, found));
+      }
+      idx = found + 1;
+      found = path.indexOf(Path.SEPARATOR_CHAR, idx);
+    }
+    if (idx < path.length()) {
+      list.add(path.substring(idx));
+    }
+    return list;
+  }
+
+  @VisibleForTesting
+  static List<List<String>> gePathsElements(List<String> paths) {
+    List<List<String>> pathsElements = new ArrayList<List<String>>(paths.size());
+    for (String path : paths) {
+      pathsElements.add(getPathElements(path));
+    }
+    return pathsElements;
+  }
+
+  @VisibleForTesting
+  enum EntryType {
+    DIR(true),
+    PREFIX(false),
+    AUTHZ_OBJECT(false);
+
+    private boolean removeIfDangling;
+
+    private EntryType(boolean removeIfDangling) {
+      this.removeIfDangling = removeIfDangling;
+    }
+
+    public boolean isRemoveIfDangling() {
+      return removeIfDangling;
+    }
+
+    public byte getByte() {
+      return (byte)toString().charAt(0);
+    }
+    
+    public static EntryType fromByte(byte b) {
+      switch (b) {
+      case ((byte)'D'):
+        return DIR;
+      case ((byte)'P'):
+        return PREFIX;
+      case ((byte)'A'):
+        return AUTHZ_OBJECT;
+      default:
+        return null;
+      }
+    }
+  }
+
+  @VisibleForTesting
+  static class Entry {
+    private Entry parent;
+    private EntryType type;
+    private String pathElement;
+    private String authzObj;
+    private final Map<String, Entry> children;
+
+    Entry(Entry parent, String pathElement, EntryType type,
+        String authzObj) {
+      this.parent = parent;
+      this.type = type;
+      this.pathElement = pathElement;
+      this.authzObj = authzObj;
+      children = new HashMap<String, Entry>();
+    }
+
+    private void setAuthzObj(String authzObj) {
+      this.authzObj = authzObj;
+    }
+
+    private void setType(EntryType type) {
+      this.type = type;
+    }
+
+    protected void removeParent() {
+      parent = null;
+    }
+
+    public String toString() {
+      return String.format("Entry[fullPath: %s, type: %s, authObject: %s]",
+          getFullPath(), type, authzObj);
+    }
+
+    private Entry createChild(List<String> pathElements, EntryType type,
+        String authzObj) {
+      Entry entryParent = this;
+      for (int i = 0; i < pathElements.size() - 1; i++) {
+        String pathElement = pathElements.get(i);
+        Entry child = entryParent.getChildren().get(pathElement);
+        if (child == null) {
+          child = new Entry(entryParent, pathElement, EntryType.DIR, null);
+          entryParent.getChildren().put(pathElement, child);
+        }
+        entryParent = child;
+      }
+      String lastPathElement = pathElements.get(pathElements.size() - 1);
+      Entry child = entryParent.getChildren().get(lastPathElement);
+      if (child == null) {
+        child = new Entry(entryParent, lastPathElement, type, authzObj);
+        entryParent.getChildren().put(lastPathElement, child);
+      } else if (type == EntryType.AUTHZ_OBJECT &&
+          child.getType() == EntryType.DIR) {
+        // if the entry already existed as dir, we change it  to be a authz obj
+        child.setAuthzObj(authzObj);
+        child.setType(EntryType.AUTHZ_OBJECT);
+      }
+      return child;
+    }
+
+    public static Entry createRoot(boolean asPrefix) {
+      return new Entry(null, "/", (asPrefix) 
+                                   ? EntryType.PREFIX : EntryType.DIR, null);
+    }
+
+    private String toPath(List<String> arr) {
+      StringBuilder sb = new StringBuilder();
+      for (String s : arr) {
+        sb.append(Path.SEPARATOR).append(s);
+      }
+      return sb.toString();
+    }
+
+    public Entry createPrefix(List<String> pathElements) {
+      Entry prefix = findPrefixEntry(pathElements);
+      if (prefix != null) {
+        throw new IllegalArgumentException(String.format(
+            "Cannot add prefix '%s' under an existing prefix '%s'", 
+            toPath(pathElements), prefix.getFullPath()));
+      }
+      return createChild(pathElements, EntryType.PREFIX, null);
+    }
+
+    public Entry createAuthzObjPath(List<String> pathElements, String authzObj) {
+      Entry entry = null;
+      Entry prefix = findPrefixEntry(pathElements);
+      if (prefix != null) {
+        // we only create the entry if is under a prefix, else we ignore it
+        entry = createChild(pathElements, EntryType.AUTHZ_OBJECT, authzObj);
+      }
+      return entry;
+    }
+
+    public void delete() {
+      if (getParent() != null) {
+        if (getChildren().isEmpty()) {
+          getParent().getChildren().remove(getPathElement());
+          getParent().deleteIfDangling();
+          parent = null;
+        } else {
+          // if the entry was for an authz object and has children, we
+          // change it to be a dir entry.
+          if (getType() == EntryType.AUTHZ_OBJECT) {
+            setType(EntryType.DIR);
+            setAuthzObj(null);
+          }
+        }
+      }
+    }
+
+    private void deleteIfDangling() {
+      if (getChildren().isEmpty() && getType().isRemoveIfDangling()) {
+        delete();
+      }
+    }
+
+    public Entry getParent() {
+      return parent;
+    }
+
+    public EntryType getType() {
+      return type;
+    }
+
+    public String getPathElement() {
+      return pathElement;
+    }
+
+    public String getAuthzObj() {
+      return authzObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    public Map<String, Entry> getChildren() {
+      return children;
+    }
+
+    public Entry findPrefixEntry(List<String> pathElements) {
+      Preconditions.checkArgument(pathElements != null,
+          "pathElements cannot be NULL");
+      return (getType() == EntryType.PREFIX) 
+             ? this : findPrefixEntry(pathElements, 0);
+    }
+
+    private Entry findPrefixEntry(List<String> pathElements, int index) {
+      Entry prefixEntry = null;
+      if (index == pathElements.size()) {
+        prefixEntry = null;
+      } else {
+        Entry child = getChildren().get(pathElements.get(index));
+        if (child != null) {
+          if (child.getType() == EntryType.PREFIX) {
+            prefixEntry = child;
+          } else {
+            prefixEntry = child.findPrefixEntry(pathElements, index + 1);
+          }
+        }
+      }
+      return prefixEntry;
+    }
+
+    public Entry find(String[] pathElements, boolean isPartialMatchOk) {
+      Preconditions.checkArgument(
+          pathElements != null && pathElements.length > 0,
+          "pathElements cannot be NULL or empty");
+      return find(pathElements, 0, isPartialMatchOk, null);
+    }
+
+    private Entry find(String[] pathElements, int index,
+        boolean isPartialMatchOk, Entry lastAuthObj) {
+      Entry found = null;
+      if (index == pathElements.length) {
+        if (isPartialMatchOk && (getType() == EntryType.AUTHZ_OBJECT)) {
+          found = this;
+        }
+      } else {
+        Entry child = getChildren().get(pathElements[index]);
+        if (child != null) {
+          if (index == pathElements.length - 1) {
+            found = (child.getType() == EntryType.AUTHZ_OBJECT) ? child : lastAuthObj;
+          } else {
+            found = child.find(pathElements, index + 1, isPartialMatchOk,
+                (child.getType() == EntryType.AUTHZ_OBJECT) ? child : lastAuthObj);
+          }
+        } else {
+          if (isPartialMatchOk) {
+            found = lastAuthObj;
+          }
+        }
+      }
+      return found;
+    }
+
+    public String getFullPath() {
+      String path = getFullPath(this, new StringBuilder()).toString();
+      if (path.isEmpty()) {
+        path = Path.SEPARATOR;
+      }
+      return path;
+    }
+
+    private StringBuilder getFullPath(Entry entry, StringBuilder sb) {
+      if (entry.getParent() != null) {
+        getFullPath(entry.getParent(), sb).append(Path.SEPARATOR).append(
+            entry.getPathElement());
+      }
+      return sb;
+    }
+
+  }
+
+  private volatile Entry root;
+  private String[] prefixes;
+  private Map<String, Set<Entry>> authzObjToPath;
+
+  public HMSPaths(String[] pathPrefixes) {
+    boolean rootPrefix = false;
+    this.prefixes = pathPrefixes;
+    for (String pathPrefix : pathPrefixes) {
+      rootPrefix = rootPrefix || pathPrefix.equals(Path.SEPARATOR);
+    }
+    if (rootPrefix && pathPrefixes.length > 1) {
+      throw new IllegalArgumentException(
+          "Root is a path prefix, there cannot be other path prefixes");
+    }
+    root = Entry.createRoot(rootPrefix);
+    if (!rootPrefix) {
+      for (String pathPrefix : pathPrefixes) {
+        root.createPrefix(getPathElements(pathPrefix));
+      }
+    }
+    authzObjToPath = new HashMap<String, Set<Entry>>();
+  }
+
+  void _addAuthzObject(String authzObj, List<String> authzObjPaths) {
+    addAuthzObject(authzObj, gePathsElements(authzObjPaths));
+  }
+
+  void addAuthzObject(String authzObj, List<List<String>> authzObjPathElements) {
+    Set<Entry> previousEntries = authzObjToPath.get(authzObj);
+    Set<Entry> newEntries = new HashSet<Entry>(authzObjPathElements.size());
+    for (List<String> pathElements : authzObjPathElements) {
+      Entry e = root.createAuthzObjPath(pathElements, authzObj);
+      if (e != null) {
+        newEntries.add(e);
+      } else {
+        // LOG WARN IGNORING PATH, no prefix
+      }
+    }
+    authzObjToPath.put(authzObj, newEntries);
+    if (previousEntries != null) {
+      previousEntries.removeAll(newEntries);
+      if (!previousEntries.isEmpty()) {
+        for (Entry entry : previousEntries) {
+          entry.delete();
+        }
+      }
+    }
+  }
+
+  void addPathsToAuthzObject(String authzObj,
+      List<List<String>> authzObjPathElements, boolean createNew) {
+    Set<Entry> entries = authzObjToPath.get(authzObj);
+    if (entries != null) {
+      Set<Entry> newEntries = new HashSet<Entry>(authzObjPathElements.size());
+      for (List<String> pathElements : authzObjPathElements) {
+        Entry e = root.createAuthzObjPath(pathElements, authzObj);
+        if (e != null) {
+          newEntries.add(e);
+        } else {
+          // LOG WARN IGNORING PATH, no prefix
+        }
+      }
+      entries.addAll(newEntries);
+    } else {
+      if (createNew) {
+        addAuthzObject(authzObj, authzObjPathElements);
+      }
+      // LOG WARN object does not exist
+    }
+  }
+
+  void _addPathsToAuthzObject(String authzObj, List<String> authzObjPaths) {
+    addPathsToAuthzObject(authzObj, gePathsElements(authzObjPaths), false);
+  }
+
+  void addPathsToAuthzObject(String authzObj, List<List<String>> authzObjPaths) {
+    addPathsToAuthzObject(authzObj, authzObjPaths, false);
+  }
+
+  void deletePathsFromAuthzObject(String authzObj,
+      List<List<String>> authzObjPathElements) {
+    Set<Entry> entries = authzObjToPath.get(authzObj);
+    if (entries != null) {
+      Set<Entry> toDelEntries = new HashSet<Entry>(authzObjPathElements.size());
+      for (List<String> pathElements : authzObjPathElements) {
+        Entry entry = root.find(
+            pathElements.toArray(new String[pathElements.size()]), false);
+        if (entry != null) {
+          entry.delete();
+          toDelEntries.add(entry);
+        } else {
+          // LOG WARN IGNORING PATH, it was not in registered
+        }
+      }
+      entries.removeAll(toDelEntries);
+    } else {
+      // LOG WARN object does not exist
+    }
+  }
+
+  void deleteAuthzObject(String authzObj) {
+    Set<Entry> entries = authzObjToPath.remove(authzObj);
+    if (entries != null) {
+      for (Entry entry : entries) {
+        entry.delete();
+      }
+    }
+  }
+
+  @Override
+  public String findAuthzObject(String[] pathElements) {
+    return findAuthzObject(pathElements, true);
+  }
+
+  @Override
+  public String findAuthzObjectExactMatch(String[] pathElements) {
+    return findAuthzObject(pathElements, false);
+  }
+
+  public String findAuthzObject(String[] pathElements, boolean isPartialOk) {
+    // Handle '/'
+    if ((pathElements == null)||(pathElements.length == 0)) return null;
+    String authzObj = null;
+    Entry entry = root.find(pathElements, isPartialOk);
+    if (entry != null) {
+      authzObj = entry.getAuthzObj();
+    }
+    return authzObj;
+  }
+
+  boolean renameAuthzObject(String oldName, List<String> oldPathElems,
+      String newName, List<String> newPathElems) {
+    // Handle '/'
+    if ((oldPathElems == null)||(oldPathElems.size() == 0)) return false;
+    Entry entry =
+        root.find(oldPathElems.toArray(new String[oldPathElems.size()]), false);
+    if ((entry != null)&&(entry.getAuthzObj().equals(oldName))) {
+      // Update pathElements
+      String[] newPath = newPathElems.toArray(new String[newPathElems.size()]);
+      // Can't use Lists.newArrayList() because of whacky generics
+      List<List<String>> pathElemsAsList = new LinkedList<List<String>>();
+      pathElemsAsList.add(oldPathElems);
+      deletePathsFromAuthzObject(oldName, pathElemsAsList);
+      if (isUnderPrefix(newPath)) {
+        // Can't use Lists.newArrayList() because of whacky generics
+        pathElemsAsList = new LinkedList<List<String>>();
+        pathElemsAsList.add(newPathElems);
+        addPathsToAuthzObject(oldName, pathElemsAsList);
+      }
+      // This would be true only for table rename
+      if (!oldName.equals(newName)) {
+        Set<Entry> eSet = authzObjToPath.get(oldName);
+        authzObjToPath.put(newName, eSet);
+        for (Entry e : eSet) {
+          if (e.getAuthzObj().equals(oldName)) {
+            e.setAuthzObj(newName);
+          }
+        }
+        authzObjToPath.remove(oldName);
+      }
+    }
+    return true;
+  }
+
+  @Override
+  public boolean isUnderPrefix(String[] pathElements) {
+    return root.findPrefixEntry(Lists.newArrayList(pathElements)) != null;
+  }
+
+  // Used by the serializer
+  String[] getPrefixes() {
+    return prefixes;
+  }
+
+  Entry getRootEntry() {
+    return root;
+  }
+
+  void setRootEntry(Entry root) {
+    this.root = root;
+  }
+
+  void setAuthzObjToPathMapping(Map<String, Set<Entry>> mapping) {
+    authzObjToPath = mapping;
+  }
+
+  @Override
+  public HMSPathsDumper getPathsDump() {
+    return new HMSPathsDumper(this);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPathsDumper.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPathsDumper.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPathsDumper.java
new file mode 100644
index 0000000..1537c1e
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/HMSPathsDumper.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.sentry.hdfs.HMSPaths.Entry;
+import org.apache.sentry.hdfs.HMSPaths.EntryType;
+import org.apache.sentry.hdfs.service.thrift.TPathEntry;
+import org.apache.sentry.hdfs.service.thrift.TPathsDump;
+
+public class HMSPathsDumper implements AuthzPathsDumper<HMSPaths> {
+
+  private final HMSPaths hmsPaths;
+
+  static class Tuple {
+    final TPathEntry entry;
+    final int id;
+    Tuple(TPathEntry entry, int id) {
+      this.entry = entry;
+      this.id = id;
+    }
+  }
+
+  public HMSPathsDumper(HMSPaths hmsPaths) {
+    this.hmsPaths = hmsPaths;
+  }
+
+  @Override
+  public TPathsDump createPathsDump() {
+    AtomicInteger counter = new AtomicInteger(0);
+    Map<Integer, TPathEntry> idMap = new HashMap<Integer, TPathEntry>();
+    Tuple tRootTuple =
+        createTPathEntry(hmsPaths.getRootEntry(), counter, idMap);
+    idMap.put(tRootTuple.id, tRootTuple.entry);
+    cloneToTPathEntry(hmsPaths.getRootEntry(), tRootTuple.entry, counter, idMap);
+    return new TPathsDump(tRootTuple.id, idMap);
+  }
+
+  private void cloneToTPathEntry(Entry parent, TPathEntry tParent,
+      AtomicInteger counter, Map<Integer, TPathEntry> idMap) {
+    for (Entry child : parent.getChildren().values()) {
+      Tuple childTuple = createTPathEntry(child, counter, idMap);
+      tParent.getChildren().add(childTuple.id);
+      cloneToTPathEntry(child, childTuple.entry, counter, idMap);
+    }
+  }
+
+  private Tuple createTPathEntry(Entry entry, AtomicInteger idCounter,
+      Map<Integer, TPathEntry> idMap) {
+    int myId = idCounter.incrementAndGet();
+    TPathEntry tEntry = new TPathEntry(entry.getType().getByte(),
+        entry.getPathElement(), new HashSet<Integer>());
+    if (entry.getAuthzObj() != null) {
+      tEntry.setAuthzObj(entry.getAuthzObj());
+    }
+    idMap.put(myId, tEntry);
+    return new Tuple(tEntry, myId);
+  }
+
+  @Override
+  public HMSPaths initializeFromDump(TPathsDump pathDump) {
+    HMSPaths hmsPaths = new HMSPaths(this.hmsPaths.getPrefixes());
+    TPathEntry tRootEntry = pathDump.getNodeMap().get(pathDump.getRootId());
+    Entry rootEntry = hmsPaths.getRootEntry();
+//    Entry rootEntry = new Entry(null, tRootEntry.getPathElement(),
+//        EntryType.fromByte(tRootEntry.getType()), tRootEntry.getAuthzObj());
+    Map<String, Set<Entry>> authzObjToPath = new HashMap<String, Set<Entry>>();
+    cloneToEntry(tRootEntry, rootEntry, pathDump.getNodeMap(), authzObjToPath,
+        rootEntry.getType() == EntryType.PREFIX);
+    hmsPaths.setRootEntry(rootEntry);
+    hmsPaths.setAuthzObjToPathMapping(authzObjToPath);
+    return hmsPaths;
+  }
+
+  private void cloneToEntry(TPathEntry tParent, Entry parent,
+      Map<Integer, TPathEntry> idMap, Map<String,
+      Set<Entry>> authzObjToPath, boolean hasCrossedPrefix) {
+    for (Integer id : tParent.getChildren()) {
+      TPathEntry tChild = idMap.get(id);
+      Entry child = null;
+      boolean isChildPrefix = hasCrossedPrefix;
+      if (!hasCrossedPrefix) {
+        child = parent.getChildren().get(tChild.getPathElement());
+        // If we havn't reached a prefix entry yet, then child should
+        // already exists.. else it is not part of the prefix
+        if (child == null) continue;
+        isChildPrefix = child.getType() == EntryType.PREFIX;
+      }
+      if (child == null) {
+        child = new Entry(parent, tChild.getPathElement(),
+            EntryType.fromByte(tChild.getType()), tChild.getAuthzObj());
+      }
+      if (child.getAuthzObj() != null) {
+        Set<Entry> paths = authzObjToPath.get(child.getAuthzObj());
+        if (paths == null) {
+          paths = new HashSet<Entry>();
+          authzObjToPath.put(child.getAuthzObj(), paths);
+        }
+        paths.add(child);
+      }
+      parent.getChildren().put(child.getPathElement(), child);
+      cloneToEntry(tChild, child, idMap, authzObjToPath, isChildPrefix);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/MetastoreClient.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/MetastoreClient.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/MetastoreClient.java
new file mode 100644
index 0000000..3ecff94
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/MetastoreClient.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+/**
+ * Interface to abstract all interactions between Sentry and Hive Metastore
+ * 
+ */
+public interface MetastoreClient {
+
+  public List<Database> getAllDatabases();
+
+  public List<Table> getAllTablesOfDatabase(Database db);
+
+  public List<Partition> listAllPartitions(Database db, Table tbl);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java
new file mode 100644
index 0000000..60f8629
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.sentry.hdfs.service.thrift.TPathChanges;
+import org.apache.sentry.hdfs.service.thrift.TPathsUpdate;
+
+import com.google.common.collect.Lists;
+
+/**
+ * A wrapper class over the TPathsUpdate thrift generated class. Please see
+ * {@link Updateable.Update} for more information 
+ */
+public class PathsUpdate implements Updateable.Update {
+  
+  public static String ALL_PATHS = "__ALL_PATHS__";
+
+  private final TPathsUpdate tPathsUpdate;
+
+  public PathsUpdate(TPathsUpdate tPathsUpdate) {
+    this.tPathsUpdate = tPathsUpdate;
+  }
+
+  public PathsUpdate(long seqNum, boolean hasFullImage) {
+    tPathsUpdate = new TPathsUpdate(hasFullImage, seqNum,
+        new LinkedList<TPathChanges>());
+  }
+
+  @Override
+  public boolean hasFullImage() {
+    return tPathsUpdate.isHasFullImage();
+  }
+  public TPathChanges newPathChange(String authzObject) {
+    TPathChanges pathChanges = new TPathChanges(authzObject,
+        new LinkedList<List<String>>(), new LinkedList<List<String>>());
+    tPathsUpdate.addToPathChanges(pathChanges);
+    return pathChanges;
+  }
+  public List<TPathChanges> getPathChanges() {
+    return tPathsUpdate.getPathChanges();
+  }
+
+  @Override
+  public long getSeqNum() {
+    return tPathsUpdate.getSeqNum();
+  }
+
+  @Override
+  public void setSeqNum(long seqNum) {
+    tPathsUpdate.setSeqNum(seqNum);
+  }
+
+  public TPathsUpdate toThrift() {
+    return tPathsUpdate;
+  }
+
+  
+
+  public static List<String> cleanPath(String path) {
+    try {
+      return Lists.newArrayList(new URI(path).getPath().split("^/")[1]
+          .split("/"));
+    } catch (URISyntaxException e) {
+      throw new RuntimeException("Incomprehensible path [" + path + "]");
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PermissionsUpdate.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PermissionsUpdate.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PermissionsUpdate.java
new file mode 100644
index 0000000..1130140
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PermissionsUpdate.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+
+import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate;
+import org.apache.sentry.hdfs.service.thrift.TPrivilegeChanges;
+import org.apache.sentry.hdfs.service.thrift.TRoleChanges;
+
+public class PermissionsUpdate implements Updateable.Update {
+
+  public static String RENAME_PRIVS = "__RENAME_PRIV__";
+  public static String ALL_AUTHZ_OBJ = "__ALL_AUTHZ_OBJ__";
+  public static String ALL_PRIVS = "__ALL_PRIVS__";
+  public static String ALL_ROLES = "__ALL_ROLES__";
+  public static String ALL_GROUPS = "__ALL_GROUPS__";
+
+  private final TPermissionsUpdate tPermUpdate;
+
+  public PermissionsUpdate(TPermissionsUpdate tPermUpdate) {
+    this.tPermUpdate = tPermUpdate;
+  }
+
+  public PermissionsUpdate(long seqNum, boolean hasFullImage) {
+    this.tPermUpdate = new TPermissionsUpdate(hasFullImage, seqNum,
+        new HashMap<String, TPrivilegeChanges>(),
+        new HashMap<String, TRoleChanges>());
+  }
+
+  @Override
+  public long getSeqNum() {
+    return tPermUpdate.getSeqNum();
+  }
+
+  @Override
+  public void setSeqNum(long seqNum) {
+    tPermUpdate.setSeqNum(seqNum);
+  }
+
+  @Override
+  public boolean hasFullImage() {
+    return tPermUpdate.isHasfullImage();
+  }
+
+  public TPrivilegeChanges addPrivilegeUpdate(String authzObj) {
+    if (tPermUpdate.getPrivilegeChanges().containsKey(authzObj)) {
+      return tPermUpdate.getPrivilegeChanges().get(authzObj);
+    }
+    TPrivilegeChanges privUpdate = new TPrivilegeChanges(authzObj,
+        new HashMap<String, String>(), new HashMap<String, String>());
+    tPermUpdate.getPrivilegeChanges().put(authzObj, privUpdate);
+    return privUpdate;
+  }
+
+  public TRoleChanges addRoleUpdate(String role) {
+    if (tPermUpdate.getRoleChanges().containsKey(role)) {
+      return tPermUpdate.getRoleChanges().get(role);
+    }
+    TRoleChanges roleUpdate = new TRoleChanges(role, new LinkedList<String>(),
+        new LinkedList<String>());
+    tPermUpdate.getRoleChanges().put(role, roleUpdate);
+    return roleUpdate;
+  }
+
+  public Collection<TRoleChanges> getRoleUpdates() {
+    return tPermUpdate.getRoleChanges().values();
+  }
+
+  public Collection<TPrivilegeChanges> getPrivilegeUpdates() {
+    return tPermUpdate.getPrivilegeChanges().values();
+  }
+
+  public TPermissionsUpdate toThrift() {
+    return tPermUpdate;
+  }
+}


[9/9] incubator-sentry git commit: SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

Posted by ls...@apache.org.
SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

This change adds support for synchronizing HDFS permissions with permissions stored in
Sentry. This makes it easy to share data across components (Hive/Impala, MR, Spark, etc)
while managing all privileges in a centralized location - Sentry. This is done using new
plugins to the HMS, HDFS, and Sentry. The HMS plugin pushes table/partition path information
to the Sentry Service, the Sentry Service forwards the path information and all privilege
updates, to the HDFS NameNode plugin, which caches this information and updates the ACLs
accordingly.

The mapping of Sentry privileges to HDFS privileges is:
ALL -> Read/Write access to data files
SELECT -> Read access to data files
INSERT -> Write access to data files


Project: http://git-wip-us.apache.org/repos/asf/incubator-sentry/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-sentry/commit/2e509e4b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-sentry/tree/2e509e4b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-sentry/diff/2e509e4b

Branch: refs/heads/master
Commit: 2e509e4bc4f06e1c207d5702aad55b290ef390a4
Parents: 49e6086
Author: Lenni Kuff <ls...@cloudera.com>
Authored: Wed Nov 12 22:58:53 2014 -0800
Committer: Lenni Kuff <ls...@cloudera.com>
Committed: Wed Nov 12 23:32:32 2014 -0800

----------------------------------------------------------------------
 bin/sentry                                      |    3 +
 pom.xml                                         |   61 +-
 .../SentryHiveAuthorizationTaskFactoryImpl.java |   14 +
 .../sentry/binding/hive/conf/HiveAuthzConf.java |    4 +-
 .../SentryMetastorePostEventListener.java       |  159 +-
 sentry-dist/pom.xml                             |    1 +
 sentry-dist/src/main/assembly/bin.xml           |   24 +-
 sentry-hdfs/pom.xml                             |   38 +
 sentry-hdfs/sentry-hdfs-common/.gitignore       |   18 +
 sentry-hdfs/sentry-hdfs-common/pom.xml          |  148 +
 .../hdfs/service/thrift/SentryHDFSService.java  | 3483 ++++++++++++++++++
 .../service/thrift/TAuthzUpdateResponse.java    |  603 +++
 .../hdfs/service/thrift/TPathChanges.java       |  765 ++++
 .../sentry/hdfs/service/thrift/TPathEntry.java  |  747 ++++
 .../sentry/hdfs/service/thrift/TPathsDump.java  |  549 +++
 .../hdfs/service/thrift/TPathsUpdate.java       |  748 ++++
 .../hdfs/service/thrift/TPermissionsUpdate.java |  810 ++++
 .../hdfs/service/thrift/TPrivilegeChanges.java  |  713 ++++
 .../hdfs/service/thrift/TRoleChanges.java       |  691 ++++
 .../java/org/apache/sentry/hdfs/AuthzPaths.java |   58 +
 .../apache/sentry/hdfs/AuthzPathsDumper.java    |   28 +
 .../apache/sentry/hdfs/AuthzPermissions.java    |   28 +
 .../java/org/apache/sentry/hdfs/HMSPaths.java   |  510 +++
 .../org/apache/sentry/hdfs/HMSPathsDumper.java  |  126 +
 .../org/apache/sentry/hdfs/MetastoreClient.java |   38 +
 .../org/apache/sentry/hdfs/PathsUpdate.java     |   88 +
 .../apache/sentry/hdfs/PermissionsUpdate.java   |   94 +
 .../sentry/hdfs/SentryHDFSServiceClient.java    |  229 ++
 .../apache/sentry/hdfs/ServiceConstants.java    |   70 +
 .../java/org/apache/sentry/hdfs/Updateable.java |   67 +
 .../sentry/hdfs/UpdateableAuthzPaths.java       |  153 +
 .../main/resources/sentry_hdfs_service.thrift   |   87 +
 .../org/apache/sentry/hdfs/TestHMSPaths.java    |  357 ++
 .../sentry/hdfs/TestHMSPathsFullDump.java       |  112 +
 .../sentry/hdfs/TestUpdateableAuthzPaths.java   |  156 +
 .../src/test/resources/hdfs-sentry.xml          |   22 +
 sentry-hdfs/sentry-hdfs-dist/pom.xml            |   79 +
 .../sentry-hdfs-namenode-plugin/.gitignore      |   18 +
 sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml |   63 +
 .../hdfs/SentryAuthorizationConstants.java      |   55 +
 .../sentry/hdfs/SentryAuthorizationInfo.java    |  237 ++
 .../hdfs/SentryAuthorizationProvider.java       |  372 ++
 .../apache/sentry/hdfs/SentryPermissions.java   |  220 ++
 .../org/apache/sentry/hdfs/SentryUpdater.java   |   61 +
 .../sentry/hdfs/UpdateableAuthzPermissions.java |  230 ++
 .../hdfs/MockSentryAuthorizationProvider.java   |   26 +
 .../sentry/hdfs/SentryAuthorizationInfoX.java   |   85 +
 .../hdfs/TestSentryAuthorizationProvider.java   |  164 +
 .../src/test/resources/hdfs-sentry.xml          |   33 +
 sentry-hdfs/sentry-hdfs-service/.gitignore      |   18 +
 sentry-hdfs/sentry-hdfs-service/pom.xml         |  104 +
 .../sentry/hdfs/ExtendedMetastoreClient.java    |  108 +
 .../org/apache/sentry/hdfs/MetastorePlugin.java |  257 ++
 .../sentry/hdfs/SentryHDFSServiceProcessor.java |  118 +
 .../hdfs/SentryHDFSServiceProcessorFactory.java |  108 +
 .../org/apache/sentry/hdfs/SentryPlugin.java    |  247 ++
 .../org/apache/sentry/hdfs/UpdateForwarder.java |  292 ++
 .../sentry/hdfs/UpdateablePermissions.java      |   62 +
 .../apache/sentry/hdfs/TestUpdateForwarder.java |  307 ++
 sentry-provider/sentry-provider-db/pom.xml      |   78 +-
 .../db/SentryMetastoreListenerPlugin.java       |   48 +
 .../provider/db/SentryPolicyStorePlugin.java    |   60 +
 .../provider/db/SimpleDBProviderBackend.java    |   40 +-
 .../db/service/persistent/SentryStore.java      |   81 +-
 .../thrift/SentryPolicyStoreProcessor.java      |   72 +-
 .../sentry/service/thrift/SentryService.java    |    1 +
 .../sentry/service/thrift/ServiceConstants.java |    7 +
 .../thrift/TestSentryPolicyStoreProcessor.java  |    1 +
 .../thrift/TestSentryServerWithoutKerberos.java |    4 +-
 sentry-tests/sentry-tests-hive/pom.xml          |   17 +
 .../tests/e2e/hdfs/TestHDFSIntegration.java     |  787 ++++
 .../sentry/tests/e2e/hive/StaticUserGroup.java  |    2 +
 ...actMetastoreTestWithStaticConfiguration.java |    2 +
 .../e2e/metastore/TestMetastoreEndToEnd.java    |   49 +-
 74 files changed, 16202 insertions(+), 113 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/bin/sentry
----------------------------------------------------------------------
diff --git a/bin/sentry b/bin/sentry
index 0b98049..93809ea 100755
--- a/bin/sentry
+++ b/bin/sentry
@@ -72,6 +72,9 @@ then
   for f in ${SENTRY_HOME}/lib/server/*.jar; do
     HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${f}
   done
+  for f in ${SENTRY_HOME}/lib/plugins/*.jar; do
+    HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${f}
+  done
   exec $HADOOP jar ${SENTRY_HOME}/lib/${_CMD_JAR} org.apache.sentry.SentryMain ${args[@]}
 else
   exec ${SENTRY_HOME}/bin/config_tool ${args[@]}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 8c59ba9..b0cdd9a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -68,7 +68,7 @@ limitations under the License.
     <derby.version>10.10.2.0</derby.version>
     <commons-cli.version>1.2</commons-cli.version>
     <hive.version>0.13.1-cdh5.3.0-SNAPSHOT</hive.version>
-    <hadoop.version>2.3.0-cdh5.1.0-SNAPSHOT</hadoop.version>
+    <hadoop.version>2.5.0-cdh5.2.0-SNAPSHOT</hadoop.version>
     <fest.reflect.version>1.4.1</fest.reflect.version>
     <guava.version>11.0.2</guava.version>
     <junit.version>4.9</junit.version>
@@ -151,6 +151,12 @@ limitations under the License.
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdfs</artifactId>
+        <version>${hadoop.version}</version>
+        <type>test-jar</type>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-minicluster</artifactId>
         <version>${hadoop.version}</version>
       </dependency>
@@ -336,6 +342,26 @@ limitations under the License.
       </dependency>
       <dependency>
         <groupId>org.apache.sentry</groupId>
+        <artifactId>sentry-hdfs-common</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.sentry</groupId>
+        <artifactId>sentry-hdfs-service</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.sentry</groupId>
+        <artifactId>sentry-hdfs-namenode-plugin</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.sentry</groupId>
+        <artifactId>sentry-hdfs-dist</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.sentry</groupId>
         <artifactId>sentry-provider-cache</artifactId>
         <version>${project.version}</version>
       </dependency>
@@ -403,6 +429,7 @@ limitations under the License.
     <module>sentry-provider</module>
     <module>sentry-policy</module>
     <module>sentry-tests</module>
+    <module>sentry-hdfs</module>
     <module>sentry-dist</module>
   </modules>
 
@@ -427,8 +454,38 @@ limitations under the License.
           <downloadSources>true</downloadSources>
           <workspaceActiveCodeStyleProfileName>GoogleStyle</workspaceActiveCodeStyleProfileName>
           <workspaceCodeStylesURL>https://google-styleguide.googlecode.com/svn/trunk/eclipse-java-google-style.xml</workspaceCodeStylesURL>
+          <sourceIncludes><include>src/gen/thrift/gen-javabean/**</include></sourceIncludes>
         </configuration>
       </plugin>
+
+<!---
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-shade-plugin</artifactId>
+        <version>2.1</version>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>shade</goal>
+            </goals>
+            <configuration>
+              <artifactSet>
+                <includes>
+                  <include>org.apache.thrift:libthrift</include>
+                </includes>
+              </artifactSet>
+              <relocations>
+                <relocation>
+                  <pattern>org.apache.thrift</pattern>
+                  <shadedPattern>sentry.org.apache.thrift</shadedPattern>
+                </relocation>
+              </relocations>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+-->
     </plugins>
 
     <pluginManagement>
@@ -515,10 +572,12 @@ limitations under the License.
                   <exclude>**/.metadata/</exclude>
                   <!-- Maven working directory -->
                   <exclude>**/target/</exclude>
+                  <exclude>**/assembly/</exclude>
                   <!-- Pre commit testing generated files -->
                   <exclude>maven-repo/</exclude>
                   <exclude>test-output/</exclude>
                   <!-- Derby files which are created after test run -->
+                  <exclude>**/dependency-reduced-pom.xml</exclude>
                   <exclude>**/derby.log</exclude>
                   <exclude>**/service.properties</exclude>
                   <exclude>**/*.lck</exclude>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryHiveAuthorizationTaskFactoryImpl.java
----------------------------------------------------------------------
diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryHiveAuthorizationTaskFactoryImpl.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryHiveAuthorizationTaskFactoryImpl.java
index f38ee91..39a22c6 100644
--- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryHiveAuthorizationTaskFactoryImpl.java
+++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/SentryHiveAuthorizationTaskFactoryImpl.java
@@ -50,11 +50,14 @@ import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 import org.apache.hadoop.hive.ql.security.authorization.PrivilegeRegistry;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.sentry.core.model.db.AccessConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 
 public class SentryHiveAuthorizationTaskFactoryImpl implements HiveAuthorizationTaskFactory {
 
+  private static final Logger LOG = LoggerFactory.getLogger(SentryHiveAuthorizationTaskFactoryImpl.class);
 
   public SentryHiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) {
 
@@ -238,6 +241,7 @@ public class SentryHiveAuthorizationTaskFactoryImpl implements HiveAuthorization
       HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs) throws SemanticException {
     List<PrincipalDesc> principalDesc = analyzePrincipalListDef(
         (ASTNode) ast.getChild(0));
+    
     List<String> roles = new ArrayList<String>();
     for (int i = 1; i < ast.getChildCount(); i++) {
       roles.add(BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(i).getText()));
@@ -314,18 +318,28 @@ public class SentryHiveAuthorizationTaskFactoryImpl implements HiveAuthorization
       ASTNode child = (ASTNode) node.getChild(i);
       PrincipalType type = null;
       switch (child.getType()) {
+      case 880:
+        type = PrincipalType.USER;
+        break;
       case HiveParser.TOK_USER:
         type = PrincipalType.USER;
         break;
+      case 685:
+        type = PrincipalType.GROUP;
+        break;
       case HiveParser.TOK_GROUP:
         type = PrincipalType.GROUP;
         break;
+      case 782:
+        type = PrincipalType.ROLE;
+        break;
       case HiveParser.TOK_ROLE:
         type = PrincipalType.ROLE;
         break;
       }
       String principalName = BaseSemanticAnalyzer.unescapeIdentifier(child.getChild(0).getText());
       PrincipalDesc principalDesc = new PrincipalDesc(principalName, type);
+      LOG.debug("## Principal : [ " + principalName + ", " + type + "]");
       principalList.add(principalDesc);
     }
     return principalList;

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
----------------------------------------------------------------------
diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
index 4d2a625..93f19f3 100644
--- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
+++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
@@ -18,7 +18,7 @@ package org.apache.sentry.binding.hive.conf;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.mortbay.log.Log;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -203,7 +203,7 @@ public class HiveAuthzConf extends Configuration {
       if (retVal == null) {
         retVal = AuthzConfVars.getDefault(varName);
       } else {
-        Log.warn("Using the deprecated config setting " + currentToDeprecatedProps.get(varName).getVar() +
+        LOG.warn("Using the deprecated config setting " + currentToDeprecatedProps.get(varName).getVar() +
             " instead of " + varName);
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java
----------------------------------------------------------------------
diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java
index 38bf8b2..3760fe9 100644
--- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java
+++ b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java
@@ -25,10 +25,14 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.sentry.SentryUserException;
@@ -38,24 +42,60 @@ import org.apache.sentry.core.common.Authorizable;
 import org.apache.sentry.core.model.db.Database;
 import org.apache.sentry.core.model.db.Server;
 import org.apache.sentry.core.model.db.Table;
+import org.apache.sentry.provider.db.SentryMetastoreListenerPlugin;
 import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
 import org.apache.sentry.service.thrift.SentryServiceClientFactory;
+import org.apache.sentry.service.thrift.ServiceConstants.ConfUtilties;
+import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class SentryMetastorePostEventListener extends MetaStoreEventListener {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(SentryMetastoreListenerPlugin.class);
   private final SentryServiceClientFactory sentryClientFactory;
   private final HiveAuthzConf authzConf;
   private final Server server;
 
+  private List<SentryMetastoreListenerPlugin> sentryPlugins = new ArrayList<SentryMetastoreListenerPlugin>(); 
+
   public SentryMetastorePostEventListener(Configuration config) {
     super(config);
     sentryClientFactory = new SentryServiceClientFactory();
 
-    authzConf = HiveAuthzConf.getAuthzConf(new HiveConf());
+    authzConf = HiveAuthzConf.getAuthzConf((HiveConf)config);
     server = new Server(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME.getVar()));
+    Iterable<String> pluginClasses = ConfUtilties.CLASS_SPLITTER
+        .split(config.get(ServerConfig.SENTRY_METASTORE_PLUGINS,
+            ServerConfig.SENTRY_METASTORE_PLUGINS_DEFAULT).trim());
+    try {
+      for (String pluginClassStr : pluginClasses) {
+        Class<?> clazz = config.getClassByName(pluginClassStr);
+        if (!SentryMetastoreListenerPlugin.class.isAssignableFrom(clazz)) {
+          throw new IllegalArgumentException("Class ["
+              + pluginClassStr + "] is not a "
+              + SentryMetastoreListenerPlugin.class.getName());
+        }
+        SentryMetastoreListenerPlugin plugin = (SentryMetastoreListenerPlugin) clazz
+            .getConstructor(Configuration.class).newInstance(config);
+        sentryPlugins.add(plugin);
+      }
+    } catch (Exception e) {
+      LOGGER.error("Could not initialize Plugin !!", e);
+      throw new RuntimeException(e);
+    }
   }
 
   @Override
   public void onCreateTable (CreateTableEvent tableEvent) throws MetaException {
+    if (tableEvent.getTable().getSd().getLocation() != null) {
+      String authzObj = tableEvent.getTable().getDbName() + "."
+          + tableEvent.getTable().getTableName();
+      String path = tableEvent.getTable().getSd().getLocation();
+      for (SentryMetastoreListenerPlugin plugin : sentryPlugins) {
+        plugin.addPath(authzObj, path);
+      }
+    }
     // drop the privileges on the given table, in case if anything was left
     // behind during the drop
     if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_CREATE_WITH_POLICY_STORE)) {
@@ -71,6 +111,13 @@ public class SentryMetastorePostEventListener extends MetaStoreEventListener {
 
   @Override
   public void onDropTable(DropTableEvent tableEvent) throws MetaException {
+    if (tableEvent.getTable().getSd().getLocation() != null) {
+      String authzObj = tableEvent.getTable().getDbName() + "."
+          + tableEvent.getTable().getTableName();
+      for (SentryMetastoreListenerPlugin plugin : sentryPlugins) {
+        plugin.removeAllPaths(authzObj, null);
+      }
+    }
     // drop the privileges on the given table
     if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_DROP_WITH_POLICY_STORE)) {
       return;
@@ -86,6 +133,13 @@ public class SentryMetastorePostEventListener extends MetaStoreEventListener {
   @Override
   public void onCreateDatabase(CreateDatabaseEvent dbEvent)
       throws MetaException {
+    if (dbEvent.getDatabase().getLocationUri() != null) {
+      String authzObj = dbEvent.getDatabase().getName();
+      String path = dbEvent.getDatabase().getLocationUri();
+      for (SentryMetastoreListenerPlugin plugin : sentryPlugins) {
+        plugin.addPath(authzObj, path);
+      }
+    }
     // drop the privileges on the database, incase anything left behind during
     // last drop db
     if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_CREATE_WITH_POLICY_STORE)) {
@@ -105,6 +159,12 @@ public class SentryMetastorePostEventListener extends MetaStoreEventListener {
    */
   @Override
   public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException {
+    String authzObj = dbEvent.getDatabase().getName();
+    for (SentryMetastoreListenerPlugin plugin : sentryPlugins) {
+      List<String> tNames = dbEvent.getHandler().get_all_tables(authzObj);
+      plugin.removeAllPaths(authzObj, tNames);
+    }
+    dropSentryDbPrivileges(dbEvent.getDatabase().getName());
     if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_DROP_WITH_POLICY_STORE)) {
       return;
     }
@@ -121,9 +181,6 @@ public class SentryMetastorePostEventListener extends MetaStoreEventListener {
   @Override
   public void onAlterTable (AlterTableEvent tableEvent) throws MetaException {
     String oldTableName = null, newTableName = null;
-    if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_ALTER_WITH_POLICY_STORE)) {
-      return;
-    }
     // don't sync privileges if the operation has failed
     if (!tableEvent.getStatus()) {
       return;
@@ -135,10 +192,63 @@ public class SentryMetastorePostEventListener extends MetaStoreEventListener {
     if (tableEvent.getNewTable() != null) {
       newTableName = tableEvent.getNewTable().getTableName();
     }
-    if (!oldTableName.equalsIgnoreCase(newTableName)) {
-      renameSentryTablePrivilege(tableEvent.getOldTable().getDbName(),
-          oldTableName, tableEvent.getNewTable().getDbName(), newTableName);
+    renameSentryTablePrivilege(tableEvent.getOldTable().getDbName(),
+        oldTableName, tableEvent.getOldTable().getSd().getLocation(),
+        tableEvent.getNewTable().getDbName(), newTableName,
+        tableEvent.getNewTable().getSd().getLocation());
+  }
+
+  @Override
+  public void onAlterPartition(AlterPartitionEvent partitionEvent)
+      throws MetaException {
+    // don't sync privileges if the operation has failed
+    if (!partitionEvent.getStatus()) {
+      return;
+    }
+    String oldLoc = null, newLoc = null;
+    if (partitionEvent.getOldPartition() != null) {
+      oldLoc = partitionEvent.getOldPartition().getSd().getLocation();
+    }
+    if (partitionEvent.getNewPartition() != null) {
+      newLoc = partitionEvent.getNewPartition().getSd().getLocation();
     }
+
+    if ((oldLoc != null) && (newLoc != null) && (!oldLoc.equals(newLoc))) {
+      String authzObj =
+          partitionEvent.getOldPartition().getDbName() + "."
+              + partitionEvent.getOldPartition().getTableName();
+      for (SentryMetastoreListenerPlugin plugin : sentryPlugins) {
+        plugin.renameAuthzObject(authzObj, oldLoc,
+            authzObj, newLoc);
+      }
+    }
+  }
+
+  @Override
+  public void onAddPartition(AddPartitionEvent partitionEvent)
+      throws MetaException {
+    for (Partition part : partitionEvent.getPartitions()) {
+      if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
+        String authzObj = part.getDbName() + "." + part.getTableName();
+        String path = part.getSd().getLocation();
+        for (SentryMetastoreListenerPlugin plugin : sentryPlugins) {
+          plugin.addPath(authzObj, path);
+        }
+      }
+    }
+    super.onAddPartition(partitionEvent);
+  }
+
+  @Override
+  public void onDropPartition(DropPartitionEvent partitionEvent)
+      throws MetaException {
+    String authzObj = partitionEvent.getTable().getDbName() + "."
+        + partitionEvent.getTable().getTableName();
+    String path = partitionEvent.getPartition().getSd().getLocation();
+    for (SentryMetastoreListenerPlugin plugin : sentryPlugins) {
+      plugin.removePath(authzObj, path);
+    }
+    super.onDropPartition(partitionEvent);
   }
 
   private SentryPolicyServiceClient getSentryServiceClient()
@@ -194,7 +304,7 @@ public class SentryMetastorePostEventListener extends MetaStoreEventListener {
   }
 
   private void renameSentryTablePrivilege(String oldDbName, String oldTabName,
-      String newDbName, String newTabName)
+      String oldPath, String newDbName, String newTabName, String newPath)
       throws MetaException {
     List<Authorizable> oldAuthorizableTable = new ArrayList<Authorizable>();
     oldAuthorizableTable.add(server);
@@ -206,18 +316,26 @@ public class SentryMetastorePostEventListener extends MetaStoreEventListener {
     newAuthorizableTable.add(new Database(newDbName));
     newAuthorizableTable.add(new Table(newTabName));
 
-    try {
-      String requestorUserName = UserGroupInformation.getCurrentUser()
-          .getShortUserName();
-      SentryPolicyServiceClient sentryClient = getSentryServiceClient();
-      sentryClient.renamePrivileges(requestorUserName, oldAuthorizableTable, newAuthorizableTable);
-    } catch (SentryUserException e) {
-      throw new MetaException(
-          "Failed to remove Sentry policies for rename table " + oldDbName
-              + "." + oldTabName + "to " + newDbName + "." + newTabName
-              + " Error: " + e.getMessage());
-    } catch (IOException e) {
-      throw new MetaException("Failed to find local user " + e.getMessage());
+    if (!oldTabName.equalsIgnoreCase(newTabName)
+        && syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_ALTER_WITH_POLICY_STORE)) {
+      try {
+        String requestorUserName = UserGroupInformation.getCurrentUser()
+            .getShortUserName();
+        SentryPolicyServiceClient sentryClient = getSentryServiceClient();
+        sentryClient.renamePrivileges(requestorUserName, oldAuthorizableTable, newAuthorizableTable);
+      } catch (SentryUserException e) {
+        throw new MetaException(
+            "Failed to remove Sentry policies for rename table " + oldDbName
+            + "." + oldTabName + "to " + newDbName + "." + newTabName
+            + " Error: " + e.getMessage());
+      } catch (IOException e) {
+        throw new MetaException("Failed to find local user " + e.getMessage());
+      }
+    }
+    // The HDFS plugin needs to know if it's a path change (set location)
+    for (SentryMetastoreListenerPlugin plugin : sentryPlugins) {
+      plugin.renameAuthzObject(oldDbName + "." + oldTabName, oldPath,
+          newDbName + "." + newTabName, newPath);
     }
   }
 
@@ -225,4 +343,5 @@ public class SentryMetastorePostEventListener extends MetaStoreEventListener {
     return "true"
         .equalsIgnoreCase((authzConf.get(syncConfVar.getVar(), "true")));
   }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-dist/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-dist/pom.xml b/sentry-dist/pom.xml
index cd7126b..4eb1d9c 100644
--- a/sentry-dist/pom.xml
+++ b/sentry-dist/pom.xml
@@ -77,6 +77,7 @@ limitations under the License.
   </dependencies>
   <build>
     <plugins>
+
      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-assembly-plugin</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-dist/src/main/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/sentry-dist/src/main/assembly/bin.xml b/sentry-dist/src/main/assembly/bin.xml
index 258e63c..f1d301b 100644
--- a/sentry-dist/src/main/assembly/bin.xml
+++ b/sentry-dist/src/main/assembly/bin.xml
@@ -46,6 +46,18 @@
         <exclude>org.apache.derby:derby</exclude>
       </excludes>
     </dependencySet>
+<!--
+    <dependencySet>
+      <outputDirectory>lib/plugins</outputDirectory>
+      <unpack>true</unpack>
+      <useTransitiveDependencies>false</useTransitiveDependencies>
+      <includes>
+        <include>org.apache.sentry:sentry-provider-db</include>
+        <include>org.apache.sentry:sentry-hdfs-common</include>
+        <include>org.apache.sentry:sentry-hdfs-namenode-plugin</include>
+      </includes>
+    </dependencySet>
+-->
     <dependencySet>
       <outputDirectory>lib/server</outputDirectory>
       <unpack>false</unpack>
@@ -57,7 +69,6 @@
         <include>com.jolbox:bonecp</include>
         <include>org.apache.hive:hive-beeline</include>
         <include>org.apache.derby:derby</include>
-        <include>org.apache.derby:derby</include>
       </includes>
     </dependencySet>
   </dependencySets>
@@ -80,6 +91,7 @@
         <exclude>sentry-provider/**</exclude>
         <exclude>sentry-policy/**</exclude>
         <exclude>sentry-tests/**</exclude>
+        <exclude>sentry-hdfs/**</exclude>
       </excludes>
 
       <includes>
@@ -95,6 +107,16 @@
       <outputDirectory>/</outputDirectory>
     </fileSet>
     <fileSet>
+      <directory>${project.parent.basedir}/sentry-hdfs/sentry-hdfs-dist/target</directory>
+      <includes>
+        <include>sentry-hdfs-*.jar</include>
+      </includes>
+      <excludes>
+        <exclude>sentry-hdfs-dist-*.jar</exclude>
+      </excludes>
+      <outputDirectory>lib/plugins</outputDirectory>
+    </fileSet>
+    <fileSet>
       <directory>${project.parent.basedir}/sentry-provider/sentry-provider-db/src/main/resources</directory>
       <includes>
         <include>**/*</include>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/pom.xml b/sentry-hdfs/pom.xml
new file mode 100644
index 0000000..1455235
--- /dev/null
+++ b/sentry-hdfs/pom.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.sentry</groupId>
+    <artifactId>sentry</artifactId>
+    <version>1.5.0-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>sentry-hdfs</artifactId>
+  <name>Sentry HDFS</name>
+  <packaging>pom</packaging>
+  <modules>
+    <module>sentry-hdfs-common</module>
+    <module>sentry-hdfs-service</module>
+    <module>sentry-hdfs-namenode-plugin</module>
+    <module>sentry-hdfs-dist</module>
+  </modules>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/.gitignore
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/.gitignore b/sentry-hdfs/sentry-hdfs-common/.gitignore
new file mode 100644
index 0000000..91ad75b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/.gitignore
@@ -0,0 +1,18 @@
+*.class
+target/
+.classpath
+.project
+.settings
+.metadata
+.idea/
+*.iml
+derby.log
+datanucleus.log
+sentry-core/sentry-core-common/src/gen
+**/TempStatsStore/
+# Package Files #
+*.jar
+*.war
+*.ear
+test-output/
+maven-repo/

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/pom.xml b/sentry-hdfs/sentry-hdfs-common/pom.xml
new file mode 100644
index 0000000..511bc53
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/pom.xml
@@ -0,0 +1,148 @@
+<?xml version="1.0"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.sentry</groupId>
+    <artifactId>sentry-hdfs</artifactId>
+    <version>1.5.0-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>sentry-hdfs-common</artifactId>
+  <name>Sentry HDFS Common</name>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <version>2.5.0</version>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-metastore</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>2.5.0</version>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <sourceDirectory>${basedir}/src/main/java</sourceDirectory>
+    <testSourceDirectory>${basedir}/src/test/java</testSourceDirectory>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>add-source</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>src/gen/thrift/gen-javabean</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+  <profiles>
+    <profile>
+      <id>thriftif</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>generate-thrift-sources</id>
+                <phase>generate-sources</phase>
+                <configuration>
+                  <target>
+                    <taskdef name="for" classname="net.sf.antcontrib.logic.ForTask"
+                      classpathref="maven.plugin.classpath" />
+                    <property name="thrift.args" value="-I ${thrift.home} --gen java:beans,hashcode"/>
+                    <property name="thrift.gen.dir" value="${basedir}/src/gen/thrift"/>
+                    <delete dir="${thrift.gen.dir}"/>
+                    <mkdir dir="${thrift.gen.dir}"/>
+                    <for param="thrift.file">
+                      <path>
+                        <fileset dir="${basedir}/src/main/resources/" includes="**/*.thrift" />
+                      </path>
+                      <sequential>
+                        <echo message="Generating Thrift code for @{thrift.file}"/>
+                        <exec executable="${thrift.home}/bin/thrift"  failonerror="true" dir=".">
+                          <arg line="${thrift.args} -I ${basedir}/src/main/resources/ -o ${thrift.gen.dir} @{thrift.file} " />
+                        </exec>
+                      </sequential>
+                    </for>
+                  </target>
+                </configuration>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-enforcer-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>enforce-property</id>
+                <goals>
+                  <goal>enforce</goal>
+                </goals>
+                <configuration>
+                  <rules>
+                    <requireProperty>
+                      <property>thrift.home</property>
+                    </requireProperty>
+                  </rules>
+                  <fail>true</fail>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
+</project>


[2/9] incubator-sentry git commit: SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

Posted by ls...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateablePermissions.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateablePermissions.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateablePermissions.java
new file mode 100644
index 0000000..6b3e2e2
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/UpdateablePermissions.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReadWriteLock;
+
+import org.apache.sentry.hdfs.PermissionsUpdate;
+import org.apache.sentry.hdfs.Updateable;
+import org.apache.sentry.hdfs.UpdateForwarder.ExternalImageRetriever;
+
+public class UpdateablePermissions implements Updateable<PermissionsUpdate>{
+
+  private AtomicLong seqNum = new AtomicLong();
+  private final ExternalImageRetriever<PermissionsUpdate> imageRetreiver;
+
+  public UpdateablePermissions(
+      ExternalImageRetriever<PermissionsUpdate> imageRetreiver) {
+    this.imageRetreiver = imageRetreiver;
+  }
+
+  @Override
+  public PermissionsUpdate createFullImageUpdate(long currSeqNum) {
+    return imageRetreiver.retrieveFullImage(currSeqNum);
+  }
+
+  @Override
+  public long getLastUpdatedSeqNum() {
+    return seqNum.get();
+  }
+
+  @Override
+  public void updatePartial(Iterable<PermissionsUpdate> update,
+      ReadWriteLock lock) {
+    for (PermissionsUpdate permsUpdate : update) {
+      seqNum.set(permsUpdate.getSeqNum());
+    }
+  }
+
+  @Override
+  public Updateable<PermissionsUpdate> updateFull(PermissionsUpdate update) {
+    UpdateablePermissions other = new UpdateablePermissions(imageRetreiver);
+    other.seqNum.set(update.getSeqNum());
+    return other;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestUpdateForwarder.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestUpdateForwarder.java b/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestUpdateForwarder.java
new file mode 100644
index 0000000..0c55bb1
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/src/test/java/org/apache/sentry/hdfs/TestUpdateForwarder.java
@@ -0,0 +1,307 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sentry.hdfs;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.locks.ReadWriteLock;
+
+import junit.framework.Assert;
+
+import org.apache.sentry.hdfs.UpdateForwarder;
+import org.apache.sentry.hdfs.Updateable;
+import org.apache.sentry.hdfs.UpdateForwarder.ExternalImageRetriever;
+import org.apache.sentry.hdfs.Updateable.Update;
+import org.junit.Test;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+
+public class TestUpdateForwarder {
+  
+  static class DummyUpdate implements Update {
+    private long seqNum = 0;
+    private boolean hasFullUpdate = false;
+    private String state;
+    public DummyUpdate(long seqNum, boolean hasFullUpdate) {
+      this.seqNum = seqNum;
+      this.hasFullUpdate = hasFullUpdate;
+    }
+    public String getState() {
+      return state;
+    }
+    public DummyUpdate setState(String stuff) {
+      this.state = stuff;
+      return this;
+    }
+    @Override
+    public boolean hasFullImage() {
+      return hasFullUpdate;
+    }
+    @Override
+    public long getSeqNum() {
+      return seqNum;
+    }
+    @Override
+    public void setSeqNum(long seqNum) {
+     this.seqNum = seqNum;
+    }
+  }
+
+  static class DummyUpdatable implements Updateable<DummyUpdate> {
+    
+    private List<String> state = new LinkedList<String>();
+    private long lastUpdatedSeqNum = 0;
+
+    @Override
+    public void updatePartial(Iterable<DummyUpdate> update, ReadWriteLock lock) {
+      for (DummyUpdate u : update) {
+        state.add(u.getState());
+        lastUpdatedSeqNum = u.seqNum;
+      }
+    }
+
+    @Override
+    public Updateable<DummyUpdate> updateFull(DummyUpdate update) {
+      DummyUpdatable retVal = new DummyUpdatable();
+      retVal.lastUpdatedSeqNum = update.seqNum;
+      retVal.state = Lists.newArrayList(update.state.split(","));
+      return retVal;
+    }
+
+    @Override
+    public long getLastUpdatedSeqNum() {
+      return lastUpdatedSeqNum;
+    }
+
+    @Override
+    public DummyUpdate createFullImageUpdate(long currSeqNum) {
+      DummyUpdate retVal = new DummyUpdate(currSeqNum, true);
+      retVal.state = Joiner.on(",").join(state);
+      return retVal;
+    }
+
+    public String getState() {
+      return Joiner.on(",").join(state);
+    }
+  }
+
+  static class DummyImageRetreiver implements ExternalImageRetriever<DummyUpdate> {
+
+    private String state;
+    public void setState(String state) {
+      this.state = state;
+    }
+    @Override
+    public DummyUpdate retrieveFullImage(long currSeqNum) {
+      DummyUpdate retVal = new DummyUpdate(currSeqNum, true);
+      retVal.state = state;
+      return retVal;
+    }
+  }
+
+  @Test
+  public void testInit() {
+    DummyImageRetreiver imageRetreiver = new DummyImageRetreiver();
+    imageRetreiver.setState("a,b,c");
+    UpdateForwarder<DummyUpdate> updateForwarder = new UpdateForwarder<DummyUpdate>(
+        new DummyUpdatable(), imageRetreiver, 10);
+    Assert.assertEquals(-2, updateForwarder.getLastUpdatedSeqNum());
+    List<DummyUpdate> allUpdates = updateForwarder.getAllUpdatesFrom(0);
+    Assert.assertTrue(allUpdates.size() == 1);
+    Assert.assertEquals("a,b,c", allUpdates.get(0).getState());
+
+    // If the current process has restarted the input seqNum will be > currSeq
+    allUpdates = updateForwarder.getAllUpdatesFrom(100);
+    Assert.assertTrue(allUpdates.size() == 1);
+    Assert.assertEquals("a,b,c", allUpdates.get(0).getState());
+    Assert.assertEquals(-2, allUpdates.get(0).getSeqNum());
+    allUpdates = updateForwarder.getAllUpdatesFrom(-1);
+    Assert.assertEquals(0, allUpdates.size());
+  }
+
+  @Test
+  public void testUpdateReceive() throws Exception {
+    DummyImageRetreiver imageRetreiver = new DummyImageRetreiver();
+    imageRetreiver.setState("a,b,c");
+    UpdateForwarder<DummyUpdate> updateForwarder = new UpdateForwarder<DummyUpdate>(
+        new DummyUpdatable(), imageRetreiver, 5);
+    updateForwarder.handleUpdateNotification(new DummyUpdate(5, false).setState("d"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    Assert.assertEquals(5, updateForwarder.getLastUpdatedSeqNum());
+    List<DummyUpdate> allUpdates = updateForwarder.getAllUpdatesFrom(0);
+    Assert.assertEquals(2, allUpdates.size());
+    Assert.assertEquals("a,b,c", allUpdates.get(0).getState());
+    Assert.assertEquals("d", allUpdates.get(1).getState());
+  }
+
+  // This happens when we the first update from HMS is a -1 (If the heartbeat
+  // thread checks Sentry's current seqNum before any update has come in)..
+  // This will lead the first and second entries in the updatelog to differ
+  // by more than +1..
+  @Test
+  public void testUpdateReceiveWithNullImageRetriver() throws Exception {
+    UpdateForwarder<DummyUpdate> updateForwarder = new UpdateForwarder<DummyUpdate>(
+        new DummyUpdatable(), null, 5);
+    updateForwarder.handleUpdateNotification(new DummyUpdate(-1, true).setState("a"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    List<DummyUpdate> allUpdates = updateForwarder.getAllUpdatesFrom(1);
+    Assert.assertEquals("a", allUpdates.get(0).getState());
+    updateForwarder.handleUpdateNotification(new DummyUpdate(6, false).setState("b"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    updateForwarder.handleUpdateNotification(new DummyUpdate(7, false).setState("c"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    Assert.assertEquals(7, updateForwarder.getLastUpdatedSeqNum());
+    allUpdates = updateForwarder.getAllUpdatesFrom(0);
+    Assert.assertEquals(2, allUpdates.size());
+    Assert.assertEquals("b", allUpdates.get(0).getState());
+    Assert.assertEquals("c", allUpdates.get(1).getState());
+  }
+
+  @Test
+  public void testGetUpdates() throws Exception {
+    DummyImageRetreiver imageRetreiver = new DummyImageRetreiver();
+    imageRetreiver.setState("a,b,c");
+    UpdateForwarder<DummyUpdate> updateForwarder = new UpdateForwarder<DummyUpdate>(
+        new DummyUpdatable(), imageRetreiver, 5);
+    updateForwarder.handleUpdateNotification(new DummyUpdate(5, false).setState("d"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    Assert.assertEquals(5, updateForwarder.getLastUpdatedSeqNum());
+    List<DummyUpdate> allUpdates = updateForwarder.getAllUpdatesFrom(0);
+    Assert.assertEquals(2, allUpdates.size());
+
+    updateForwarder.handleUpdateNotification(new DummyUpdate(6, false).setState("e"));
+    updateForwarder.handleUpdateNotification(new DummyUpdate(7, false).setState("f"));
+
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    Assert.assertEquals(7, updateForwarder.getLastUpdatedSeqNum());
+    allUpdates = updateForwarder.getAllUpdatesFrom(0);
+    Assert.assertEquals(4, allUpdates.size());
+    Assert.assertEquals("a,b,c", allUpdates.get(0).getState());
+    Assert.assertEquals(4, allUpdates.get(0).getSeqNum());
+    Assert.assertEquals("d", allUpdates.get(1).getState());
+    Assert.assertEquals(5, allUpdates.get(1).getSeqNum());
+    Assert.assertEquals("e", allUpdates.get(2).getState());
+    Assert.assertEquals(6, allUpdates.get(2).getSeqNum());
+    Assert.assertEquals("f", allUpdates.get(3).getState());
+    Assert.assertEquals(7, allUpdates.get(3).getSeqNum());
+
+    updateForwarder.handleUpdateNotification(new DummyUpdate(8, false).setState("g"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    Assert.assertEquals(8, updateForwarder.getLastUpdatedSeqNum());
+    allUpdates = updateForwarder.getAllUpdatesFrom(8);
+    Assert.assertEquals(1, allUpdates.size());
+    Assert.assertEquals("g", allUpdates.get(0).getState());
+  }
+
+  @Test
+  public void testGetUpdatesAfterExternalEntityReset() throws Exception {
+    DummyImageRetreiver imageRetreiver = new DummyImageRetreiver();
+    imageRetreiver.setState("a,b,c");
+    UpdateForwarder<DummyUpdate> updateForwarder = new UpdateForwarder<DummyUpdate>(
+        new DummyUpdatable(), imageRetreiver, 5);
+    updateForwarder.handleUpdateNotification(new DummyUpdate(5, false).setState("d"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+
+    updateForwarder.handleUpdateNotification(new DummyUpdate(6, false).setState("e"));
+    updateForwarder.handleUpdateNotification(new DummyUpdate(7, false).setState("f"));
+
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    Assert.assertEquals(7, updateForwarder.getLastUpdatedSeqNum());
+    List<DummyUpdate> allUpdates = updateForwarder.getAllUpdatesFrom(0);
+    Assert.assertEquals(4, allUpdates.size());
+    Assert.assertEquals("f", allUpdates.get(3).getState());
+    Assert.assertEquals(7, allUpdates.get(3).getSeqNum());
+
+    updateForwarder.handleUpdateNotification(new DummyUpdate(8, false).setState("g"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    Assert.assertEquals(8, updateForwarder.getLastUpdatedSeqNum());
+    allUpdates = updateForwarder.getAllUpdatesFrom(8);
+    Assert.assertEquals(1, allUpdates.size());
+    Assert.assertEquals("g", allUpdates.get(0).getState());
+
+    imageRetreiver.setState("a,b,c,d,e,f,g,h");
+
+    // New update comes with SeqNum = 1
+    updateForwarder.handleUpdateNotification(new DummyUpdate(1, false).setState("h"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    // NN plugin asks for next update
+    allUpdates = updateForwarder.getAllUpdatesFrom(9);
+    Assert.assertEquals(1, allUpdates.size());
+    Assert.assertEquals("a,b,c,d,e,f,g,h", allUpdates.get(0).getState());
+    Assert.assertEquals(1, allUpdates.get(0).getSeqNum());
+  }
+
+  @Test
+  public void testUpdateLogCompression() throws Exception {
+    DummyImageRetreiver imageRetreiver = new DummyImageRetreiver();
+    imageRetreiver.setState("a,b,c");
+    UpdateForwarder<DummyUpdate> updateForwarder = new UpdateForwarder<DummyUpdate>(
+        new DummyUpdatable(), imageRetreiver, 5);
+    updateForwarder.handleUpdateNotification(new DummyUpdate(5, false).setState("d"));
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    Assert.assertEquals(5, updateForwarder.getLastUpdatedSeqNum());
+    List<DummyUpdate> allUpdates = updateForwarder.getAllUpdatesFrom(0);
+    Assert.assertEquals(2, allUpdates.size());
+
+    updateForwarder.handleUpdateNotification(new DummyUpdate(6, false).setState("e"));
+    updateForwarder.handleUpdateNotification(new DummyUpdate(7, false).setState("f"));
+    updateForwarder.handleUpdateNotification(new DummyUpdate(8, false).setState("g"));
+    updateForwarder.handleUpdateNotification(new DummyUpdate(9, false).setState("h"));
+    updateForwarder.handleUpdateNotification(new DummyUpdate(10, false).setState("i"));
+    updateForwarder.handleUpdateNotification(new DummyUpdate(11, false).setState("j"));
+
+    while(!updateForwarder.areAllUpdatesCommited()) {
+      Thread.sleep(100);
+    }
+    Assert.assertEquals(11, updateForwarder.getLastUpdatedSeqNum());
+    allUpdates = updateForwarder.getAllUpdatesFrom(0);
+    Assert.assertEquals(3, allUpdates.size());
+    Assert.assertEquals("a,b,c,d,e,f,g,h", allUpdates.get(0).getState());
+    Assert.assertEquals(9, allUpdates.get(0).getSeqNum());
+    Assert.assertEquals("i", allUpdates.get(1).getState());
+    Assert.assertEquals(10, allUpdates.get(1).getSeqNum());
+    Assert.assertEquals("j", allUpdates.get(2).getState());
+    Assert.assertEquals(11, allUpdates.get(2).getSeqNum());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/pom.xml b/sentry-provider/sentry-provider-db/pom.xml
index fbf831a..e2f035f 100644
--- a/sentry-provider/sentry-provider-db/pom.xml
+++ b/sentry-provider/sentry-provider-db/pom.xml
@@ -42,6 +42,11 @@ limitations under the License.
       <scope>provided</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+      <version>2.5.0</version>
+    </dependency>
+    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <scope>test</scope>
@@ -89,6 +94,11 @@ limitations under the License.
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
+      <artifactId>hive-exec</artifactId>
+      <version>0.13.1-cdh5.2.0-SNAPSHOT</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
       <artifactId>hive-shims</artifactId>
       <scope>provided</scope>
     </dependency>
@@ -163,6 +173,11 @@ limitations under the License.
       <artifactId>mockito-all</artifactId>
       <scope>test</scope>
     </dependency>
+      <dependency>
+        <groupId>org.apache.hive</groupId>
+        <artifactId>hive-metastore</artifactId>
+        <version>${hive.version}</version>
+      </dependency>
   </dependencies>
 
   <build>
@@ -214,68 +229,5 @@ limitations under the License.
       </plugin>
     </plugins>
   </build>
-  <profiles>
-    <profile>
-      <id>thriftif</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>generate-thrift-sources</id>
-                <phase>generate-sources</phase>
-                <configuration>
-                  <target>
-                    <taskdef name="for" classname="net.sf.antcontrib.logic.ForTask"
-                      classpathref="maven.plugin.classpath" />
-                    <property name="thrift.args" value="-I ${thrift.home} --gen java:beans,hashcode"/>
-                    <property name="thrift.gen.dir" value="${basedir}/src/gen/thrift"/>
-                    <delete dir="${thrift.gen.dir}"/>
-                    <mkdir dir="${thrift.gen.dir}"/>
-                    <for param="thrift.file">
-                      <path>
-                        <fileset dir="${basedir}/src/main/resources/" includes="**/*.thrift" />
-                      </path>
-                      <sequential>
-                        <echo message="Generating Thrift code for @{thrift.file}"/>
-                        <exec executable="${thrift.home}/bin/thrift"  failonerror="true" dir=".">
-                          <arg line="${thrift.args} -I ${basedir}/src/main/resources/ -o ${thrift.gen.dir} @{thrift.file} " />
-                        </exec>
-                      </sequential>
-                    </for>
-                  </target>
-                </configuration>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-enforcer-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>enforce-property</id>
-                <goals>
-                  <goal>enforce</goal>
-                </goals>
-                <configuration>
-                  <rules>
-                    <requireProperty>
-                      <property>thrift.home</property>
-                    </requireProperty>
-                  </rules>
-                  <fail>true</fail>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
 
 </project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryMetastoreListenerPlugin.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryMetastoreListenerPlugin.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryMetastoreListenerPlugin.java
new file mode 100644
index 0000000..79cf4a4
--- /dev/null
+++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryMetastoreListenerPlugin.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.provider.db;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Plugin interface providing hooks to implementing classes, which are invoked
+ * on path creation/updation and deletion
+ */
+public abstract class SentryMetastoreListenerPlugin {
+  
+  private static List<SentryMetastoreListenerPlugin> registry = new LinkedList<SentryMetastoreListenerPlugin>();
+  
+  public static void addToRegistry(SentryMetastoreListenerPlugin plugin) {
+    registry.add(plugin);
+  }
+
+  public static List<SentryMetastoreListenerPlugin> getPlugins() {
+    return registry;
+  }
+
+  public abstract void renameAuthzObject(String oldName, String oldPath,
+      String newName, String newPath);
+  
+  public abstract void addPath(String authzObj, String path);
+
+  public abstract void removePath(String authzObj, String path);
+
+  public abstract void removeAllPaths(String authzObj, List<String> childObjects);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryPolicyStorePlugin.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryPolicyStorePlugin.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryPolicyStorePlugin.java
new file mode 100644
index 0000000..998a48b
--- /dev/null
+++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SentryPolicyStorePlugin.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sentry.provider.db;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.sentry.SentryUserException;
+import org.apache.sentry.provider.db.service.persistent.SentryStore;
+import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleAddGroupsRequest;
+import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleDeleteGroupsRequest;
+import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleGrantPrivilegeRequest;
+import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleRevokePrivilegeRequest;
+import org.apache.sentry.provider.db.service.thrift.TDropPrivilegesRequest;
+import org.apache.sentry.provider.db.service.thrift.TDropSentryRoleRequest;
+import org.apache.sentry.provider.db.service.thrift.TRenamePrivilegesRequest;
+
+public interface SentryPolicyStorePlugin {
+
+  @SuppressWarnings("serial")
+  public static class SentryPluginException extends SentryUserException {
+    public SentryPluginException(String msg) {
+      super(msg);
+    }
+    public SentryPluginException(String msg, Throwable t) {
+      super(msg, t);
+    }
+  }
+
+  public void initialize(Configuration conf, SentryStore sentryStore) throws SentryPluginException;
+
+  public void onAlterSentryRoleAddGroups(TAlterSentryRoleAddGroupsRequest tRequest) throws SentryPluginException;
+
+  public void onAlterSentryRoleDeleteGroups(TAlterSentryRoleDeleteGroupsRequest tRequest) throws SentryPluginException;
+
+  public void onAlterSentryRoleGrantPrivilege(TAlterSentryRoleGrantPrivilegeRequest tRequest) throws SentryPluginException;
+
+  public void onAlterSentryRoleRevokePrivilege(TAlterSentryRoleRevokePrivilegeRequest tRequest) throws SentryPluginException;
+
+  public void onDropSentryRole(TDropSentryRoleRequest tRequest) throws SentryPluginException;
+
+  public void onRenameSentryPrivilege(TRenamePrivilegesRequest request) throws SentryPluginException;
+
+  public void onDropSentryPrivilege(TDropPrivilegesRequest request) throws SentryPluginException;
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SimpleDBProviderBackend.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SimpleDBProviderBackend.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SimpleDBProviderBackend.java
index b66037a..5f34b4c 100644
--- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SimpleDBProviderBackend.java
+++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/SimpleDBProviderBackend.java
@@ -39,9 +39,10 @@ public class SimpleDBProviderBackend implements ProviderBackend {
   private static final Logger LOGGER = LoggerFactory
       .getLogger(SimpleDBProviderBackend.class);
 
-  private final SentryPolicyServiceClient policyServiceClient;
+  private SentryPolicyServiceClient policyServiceClient;
 
   private volatile boolean initialized;
+  private Configuration conf; 
 
   public SimpleDBProviderBackend(Configuration conf, String resourcePath) throws IOException {
     // DB Provider doesn't use policy file path
@@ -50,6 +51,8 @@ public class SimpleDBProviderBackend implements ProviderBackend {
 
   public SimpleDBProviderBackend(Configuration conf) throws IOException {
     this(new SentryPolicyServiceClient(conf));
+    this.initialized = false;
+    this.conf = conf;
   }
 
   @VisibleForTesting
@@ -74,14 +77,28 @@ public class SimpleDBProviderBackend implements ProviderBackend {
    */
   @Override
   public ImmutableSet<String> getPrivileges(Set<String> groups, ActiveRoleSet roleSet, Authorizable... authorizableHierarchy) {
+    return getPrivileges(1, groups, roleSet, authorizableHierarchy);
+  }
+
+  private ImmutableSet<String> getPrivileges(int retryCount, Set<String> groups, ActiveRoleSet roleSet, Authorizable... authorizableHierarchy) {
     if (!initialized) {
       throw new IllegalStateException("Backend has not been properly initialized");
     }
     try {
-      return ImmutableSet.copyOf(policyServiceClient.listPrivilegesForProvider(groups, roleSet, authorizableHierarchy));
-    } catch (SentryUserException e) {
-      String msg = "Unable to obtain privileges from server: " + e.getMessage();
-      LOGGER.error(msg, e);
+      return ImmutableSet.copyOf(getSentryClient().listPrivilegesForProvider(groups, roleSet, authorizableHierarchy));
+    } catch (Exception e) {
+      policyServiceClient = null;
+      if (retryCount > 0) {
+        return getPrivileges(retryCount - 1, groups, roleSet, authorizableHierarchy);
+      } else {
+        String msg = "Unable to obtain privileges from server: " + e.getMessage();
+        LOGGER.error(msg, e);
+        try {
+          policyServiceClient.close();
+        } catch (Exception ex2) {
+          // Ignore
+        }
+      }
     }
     return ImmutableSet.of();
   }
@@ -101,6 +118,19 @@ public class SimpleDBProviderBackend implements ProviderBackend {
     }
   }
 
+  private SentryPolicyServiceClient getSentryClient() {
+    if (policyServiceClient == null) {
+      try {
+        policyServiceClient = new SentryPolicyServiceClient(conf);
+      } catch (Exception e) {
+        LOGGER.error("Error connecting to Sentry ['{}'] !!",
+            e.getMessage());
+        policyServiceClient = null;
+        return null;
+      }
+    }
+    return policyServiceClient;
+  }
   /**
    * SimpleDBProviderBackend does not implement validatePolicy()
    */

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStore.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStore.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStore.java
index f6699d2..743900b 100644
--- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStore.java
+++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/persistent/SentryStore.java
@@ -24,6 +24,7 @@ import static org.apache.sentry.provider.common.ProviderConstants.KV_JOINER;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -86,6 +87,7 @@ public class SentryStore {
 
   public static String NULL_COL = "__NULL__";
   static final String DEFAULT_DATA_DIR = "sentry_policy_db";
+
   /**
    * Commit order sequence id. This is used by notification handlers
    * to know the order in which events where committed to the database.
@@ -762,7 +764,6 @@ public class SentryStore {
     }
   }
 
-
   List<MSentryPrivilege> getMSentryPrivileges(Set<String> roleNames, TSentryAuthorizable authHierarchy) {
     if ((roleNames.size() == 0)||(roleNames == null)) return new ArrayList<MSentryPrivilege>();
     boolean rollbackTransaction = true;
@@ -1506,4 +1507,82 @@ public class SentryStore {
     return Sets.newHashSet(conf.getStrings(
         ServerConfig.ADMIN_GROUPS, new String[]{}));
   }
+
+  /**
+   * This returns a Mapping of AuthZObj(db/table) -> (Role -> permission)
+   */
+  public Map<String, HashMap<String, String>> retrieveFullPrivilegeImage() {
+    Map<String, HashMap<String, String>> retVal = new HashMap<String, HashMap<String,String>>();
+    boolean rollbackTransaction = true;
+    PersistenceManager pm = null;
+    try {
+      pm = openTransaction();
+      Query query = pm.newQuery(MSentryPrivilege.class);
+      String filters = "(serverName != \"__NULL__\") "
+          + "&& (dbName != \"__NULL__\") " + "&& (URI == \"__NULL__\")";
+      query.setFilter(filters.toString());
+      query
+          .setOrdering("serverName ascending, dbName ascending, tableName ascending");
+      List<MSentryPrivilege> privileges = (List<MSentryPrivilege>) query
+          .execute();
+      rollbackTransaction = false;
+      for (MSentryPrivilege mPriv : privileges) {
+        String authzObj = mPriv.getDbName();
+        if (!isNULL(mPriv.getTableName())) {
+          authzObj = authzObj + "." + mPriv.getTableName();
+        }
+        HashMap<String, String> pUpdate = retVal.get(authzObj);
+        if (pUpdate == null) {
+          pUpdate = new HashMap<String, String>();
+          retVal.put(authzObj, pUpdate);
+        }
+        for (MSentryRole mRole : mPriv.getRoles()) {
+          String existingPriv = pUpdate.get(mRole.getRoleName());
+          if (existingPriv == null) {
+            pUpdate.put(mRole.getRoleName(), mPriv.getAction().toUpperCase());
+          } else {
+            pUpdate.put(mRole.getRoleName(), existingPriv + ","
+                + mPriv.getAction().toUpperCase());
+          }
+        }
+      }
+      commitTransaction(pm);
+      return retVal;
+    } finally {
+      if (rollbackTransaction) {
+        rollbackTransaction(pm);
+      }
+    }
+  }
+
+  /**
+   * This returns a Mapping of Role -> [Groups]
+   */
+  public Map<String, LinkedList<String>> retrieveFullRoleImage() {
+    Map<String, LinkedList<String>> retVal = new HashMap<String, LinkedList<String>>();
+    boolean rollbackTransaction = true;
+    PersistenceManager pm = null;
+    try {
+      pm = openTransaction();
+      Query query = pm.newQuery(MSentryGroup.class);
+      List<MSentryGroup> groups = (List<MSentryGroup>) query.execute();
+      for (MSentryGroup mGroup : groups) {
+        for (MSentryRole role : mGroup.getRoles()) {
+          LinkedList<String> rUpdate = retVal.get(role.getRoleName());
+          if (rUpdate == null) {
+            rUpdate = new LinkedList<String>();
+            retVal.put(role.getRoleName(), rUpdate);
+          }
+          rUpdate.add(mGroup.getGroupName());
+        }
+      }
+      commitTransaction(pm);
+      return retVal;
+    } finally {
+      if (rollbackTransaction) {
+        rollbackTransaction(pm);
+      }
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyStoreProcessor.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyStoreProcessor.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyStoreProcessor.java
index b20e71e..4774b90 100644
--- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyStoreProcessor.java
+++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/provider/db/service/thrift/SentryPolicyStoreProcessor.java
@@ -18,16 +18,26 @@
 
 package org.apache.sentry.provider.db.service.thrift;
 
+import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.regex.Pattern;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import com.codahale.metrics.Timer;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.sentry.SentryUserException;
 import org.apache.sentry.core.model.db.AccessConstants;
 import org.apache.sentry.provider.common.GroupMappingService;
@@ -35,12 +45,16 @@ import org.apache.sentry.provider.db.SentryAccessDeniedException;
 import org.apache.sentry.provider.db.SentryAlreadyExistsException;
 import org.apache.sentry.provider.db.SentryInvalidInputException;
 import org.apache.sentry.provider.db.SentryNoSuchObjectException;
+import org.apache.sentry.provider.db.SentryPolicyStorePlugin;
+import org.apache.sentry.provider.db.SentryPolicyStorePlugin.SentryPluginException;
 import org.apache.sentry.provider.db.log.entity.JsonLogEntityFactory;
 import org.apache.sentry.provider.db.log.util.Constants;
 import org.apache.sentry.provider.db.service.persistent.CommitContext;
 import org.apache.sentry.provider.db.service.persistent.SentryStore;
 import org.apache.sentry.provider.db.service.thrift.PolicyStoreConstants.PolicyStoreServerConfig;
+import org.apache.sentry.service.thrift.ServiceConstants.ConfUtilties;
 import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig;
+import org.apache.sentry.service.thrift.ProcessorFactory;
 import org.apache.sentry.service.thrift.Status;
 import org.apache.sentry.service.thrift.TSentryResponseStatus;
 import org.apache.thrift.TException;
@@ -62,6 +76,8 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
 
   public static final String SENTRY_POLICY_SERVICE_NAME = "SentryPolicyService";
 
+  public static volatile SentryPolicyStoreProcessor instance;
+
   private final String name;
   private final Configuration conf;
   private final SentryStore sentryStore;
@@ -70,6 +86,8 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
   private boolean isReady;
   SentryMetrics sentryMetrics;
 
+  private List<SentryPolicyStorePlugin> sentryPlugins = new LinkedList<SentryPolicyStorePlugin>();
+
   public SentryPolicyStoreProcessor(String name, Configuration conf) throws Exception {
     super();
     this.name = name;
@@ -81,6 +99,23 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
     isReady = true;
     adminGroups = ImmutableSet.copyOf(toTrimedLower(Sets.newHashSet(conf.getStrings(
         ServerConfig.ADMIN_GROUPS, new String[]{}))));
+    Iterable<String> pluginClasses = ConfUtilties.CLASS_SPLITTER
+        .split(conf.get(ServerConfig.SENTRY_POLICY_STORE_PLUGINS,
+            ServerConfig.SENTRY_POLICY_STORE_PLUGINS_DEFAULT).trim());
+    for (String pluginClassStr : pluginClasses) {
+      Class<?> clazz = conf.getClassByName(pluginClassStr);
+      if (!SentryPolicyStorePlugin.class.isAssignableFrom(clazz)) {
+        throw new IllegalArgumentException("Sentry Plugin ["
+            + pluginClassStr + "] is not a "
+            + SentryPolicyStorePlugin.class.getName());
+      }
+      SentryPolicyStorePlugin plugin = (SentryPolicyStorePlugin)clazz.newInstance();
+      plugin.initialize(conf, sentryStore);
+      sentryPlugins.add(plugin);
+    }
+    if (instance == null) {
+      instance = this;
+    }
     initMetrics();
   }
 
@@ -108,6 +143,11 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
     }
   }
 
+  public void registerPlugin(SentryPolicyStorePlugin plugin) throws SentryPluginException {
+    plugin.initialize(conf, sentryStore);
+    sentryPlugins.add(plugin);
+  }
+
   @VisibleForTesting
   static List<NotificationHandler> createHandlers(Configuration conf)
   throws SentryConfigurationException {
@@ -211,6 +251,9 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
       response.setPrivilege(request.getPrivilege());
       notificationHandlerInvoker.alter_sentry_role_grant_privilege(commitContext,
           request, response);
+      for (SentryPolicyStorePlugin plugin : sentryPlugins) {
+        plugin.onAlterSentryRoleGrantPrivilege(request);
+      }
     } catch (SentryNoSuchObjectException e) {
       String msg = "Role: " + request.getRoleName() + " doesn't exist.";
       LOGGER.error(msg, e);
@@ -246,12 +289,15 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
       response.setStatus(Status.OK());
       notificationHandlerInvoker.alter_sentry_role_revoke_privilege(commitContext,
           request, response);
+      for (SentryPolicyStorePlugin plugin : sentryPlugins) {
+        plugin.onAlterSentryRoleRevokePrivilege(request);
+      }
     } catch (SentryNoSuchObjectException e) {
       String msg = "Privilege: [server=" + request.getPrivilege().getServerName() +
-    		  ",db=" + request.getPrivilege().getDbName() +
-    		  ",table=" + request.getPrivilege().getTableName() +
-    		  ",URI=" + request.getPrivilege().getURI() +
-    		  ",action=" + request.getPrivilege().getAction() + "] doesn't exist.";
+              ",db=" + request.getPrivilege().getDbName() +
+              ",table=" + request.getPrivilege().getTableName() +
+              ",URI=" + request.getPrivilege().getURI() +
+              ",action=" + request.getPrivilege().getAction() + "] doesn't exist.";
       LOGGER.error(msg, e);
       response.setStatus(Status.NoSuchObject(msg, e));
     } catch (SentryInvalidInputException e) {
@@ -287,6 +333,9 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
       response.setStatus(Status.OK());
       notificationHandlerInvoker.drop_sentry_role(commitContext,
           request, response);
+      for (SentryPolicyStorePlugin plugin : sentryPlugins) {
+        plugin.onDropSentryRole(request);
+      }
     } catch (SentryNoSuchObjectException e) {
       String msg = "Role :" + request + " does not exist.";
       LOGGER.error(msg, e);
@@ -320,6 +369,9 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
       response.setStatus(Status.OK());
       notificationHandlerInvoker.alter_sentry_role_add_groups(commitContext,
           request, response);
+      for (SentryPolicyStorePlugin plugin : sentryPlugins) {
+        plugin.onAlterSentryRoleAddGroups(request);
+      }
     } catch (SentryNoSuchObjectException e) {
       String msg = "Role: " + request + " does not exist.";
       LOGGER.error(msg, e);
@@ -353,6 +405,9 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
       response.setStatus(Status.OK());
       notificationHandlerInvoker.alter_sentry_role_delete_groups(commitContext,
           request, response);
+      for (SentryPolicyStorePlugin plugin : sentryPlugins) {
+        plugin.onAlterSentryRoleDeleteGroups(request);
+      }
     } catch (SentryNoSuchObjectException e) {
       String msg = "Role: " + request + " does not exist.";
       LOGGER.error(msg, e);
@@ -548,7 +603,10 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
     try {
       authorize(request.getRequestorUserName(), adminGroups);
       sentryStore.dropPrivilege(request.getAuthorizable());
-      response.setStatus(Status.OK());
+      for (SentryPolicyStorePlugin plugin : sentryPlugins) {
+        plugin.onDropSentryPrivilege(request);
+      }
+      response.setStatus(Status.OK()); 
     } catch (SentryAccessDeniedException e) {
       LOGGER.error(e.getMessage(), e);
       response.setStatus(Status.AccessDenied(e.getMessage(), e));
@@ -572,6 +630,9 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
       authorize(request.getRequestorUserName(), adminGroups);
       sentryStore.renamePrivilege(request.getOldAuthorizable(),
           request.getNewAuthorizable());
+      for (SentryPolicyStorePlugin plugin : sentryPlugins) {
+        plugin.onRenameSentryPrivilege(request);
+      }
       response.setStatus(Status.OK());
     } catch (SentryAccessDeniedException e) {
       LOGGER.error(e.getMessage(), e);
@@ -633,6 +694,7 @@ public class SentryPolicyStoreProcessor implements SentryPolicyService.Iface {
       }
       response.setPrivilegesMapByAuth(authRoleMap);
       response.setStatus(Status.OK());
+      // TODO : Sentry - HDFS : Have to handle this
     } catch (SentryAccessDeniedException e) {
       LOGGER.error(e.getMessage(), e);
       response.setStatus(Status.AccessDenied(e.getMessage(), e));

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java
index 1e20ff1..b19b79c 100644
--- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java
+++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/SentryService.java
@@ -54,6 +54,7 @@ import org.apache.sentry.service.thrift.ServiceConstants.ConfUtilties;
 import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig;
 import org.apache.thrift.TMultiplexedProcessor;
 import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TCompactProtocol;
 import org.apache.thrift.server.TServer;
 import org.apache.thrift.server.TThreadPoolServer;
 import org.apache.thrift.transport.TSaslServerTransport;

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ServiceConstants.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ServiceConstants.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ServiceConstants.java
index bc86963..03ed378 100644
--- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ServiceConstants.java
+++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/ServiceConstants.java
@@ -67,6 +67,13 @@ public class ServiceConstants {
     public static final String RPC_MIN_THREADS = "sentry.service.server-min-threads";
     public static final int RPC_MIN_THREADS_DEFAULT = 10;
     public static final String ALLOW_CONNECT = "sentry.service.allow.connect";
+    
+    public static final String SENTRY_POLICY_STORE_PLUGINS = "sentry.policy.store.plugins";
+    public static final String SENTRY_POLICY_STORE_PLUGINS_DEFAULT = "";
+
+    public static final String SENTRY_METASTORE_PLUGINS = "sentry.metastore.plugins";
+    public static final String SENTRY_METASTORE_PLUGINS_DEFAULT = "";
+
     public static final String PROCESSOR_FACTORIES = "sentry.service.processor.factories";
     public static final String PROCESSOR_FACTORIES_DEFAULT =
         "org.apache.sentry.provider.db.service.thrift.SentryPolicyStoreProcessorFactory";

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryPolicyStoreProcessor.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryPolicyStoreProcessor.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryPolicyStoreProcessor.java
index 46f8fb8..ea4e967 100644
--- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryPolicyStoreProcessor.java
+++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryPolicyStoreProcessor.java
@@ -21,6 +21,7 @@ import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.sentry.provider.db.service.thrift.PolicyStoreConstants.PolicyStoreServerConfig;
+import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig;
 import org.junit.Before;
 import org.junit.Test;
 

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerWithoutKerberos.java
----------------------------------------------------------------------
diff --git a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerWithoutKerberos.java b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerWithoutKerberos.java
index e5238a6..777c6d8 100644
--- a/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerWithoutKerberos.java
+++ b/sentry-provider/sentry-provider-db/src/test/java/org/apache/sentry/provider/db/service/thrift/TestSentryServerWithoutKerberos.java
@@ -18,7 +18,6 @@
 
 package org.apache.sentry.provider.db.service.thrift;
 import static junit.framework.Assert.assertEquals;
-import static org.junit.Assert.assertEquals;
 
 import java.util.HashSet;
 import java.util.Set;
@@ -77,7 +76,6 @@ public class TestSentryServerWithoutKerberos extends SentryServiceIntegrationBas
     client.grantTablePrivilege(requestorUserName, roleName1, "server", "db2", "table3", "ALL");
     client.grantTablePrivilege(requestorUserName, roleName1, "server", "db2", "table4", "ALL");
 
-
     client.dropRoleIfExists(requestorUserName, roleName2);
     client.createRole(requestorUserName, roleName2);
     client.grantRoleToGroup(requestorUserName, group1, roleName2);
@@ -89,6 +87,7 @@ public class TestSentryServerWithoutKerberos extends SentryServiceIntegrationBas
     client.grantTablePrivilege(requestorUserName, roleName2, "server", "db2", "table4", "ALL");
     client.grantTablePrivilege(requestorUserName, roleName2, "server", "db3", "table5", "ALL");
 
+
     Set<TSentryPrivilege> listPrivilegesByRoleName = client.listPrivilegesByRoleName(requestorUserName, roleName2, Lists.newArrayList(new Server("server"), new Database("db1")));
     assertEquals("Privilege not assigned to role2 !!", 2, listPrivilegesByRoleName.size());
 
@@ -162,4 +161,5 @@ public class TestSentryServerWithoutKerberos extends SentryServiceIntegrationBas
     assertEquals(0, client.listPrivilegesForProvider(requestorUserGroupNames,
             ActiveRoleSet.ALL).size());
   }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-tests/sentry-tests-hive/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/pom.xml b/sentry-tests/sentry-tests-hive/pom.xml
index 10415fc..a3c3295 100644
--- a/sentry-tests/sentry-tests-hive/pom.xml
+++ b/sentry-tests/sentry-tests-hive/pom.xml
@@ -222,6 +222,21 @@ limitations under the License.
     </dependency>
     <dependency>
       <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-common</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-service</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-namenode-plugin</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
       <artifactId>sentry-policy-db</artifactId>
       <scope>test</scope>
     </dependency>
@@ -229,12 +244,14 @@ limitations under the License.
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-minicluster</artifactId>
       <scope>test</scope>
+<!--
       <exclusions>
           <exclusion>
             <groupId>org.apache.hadoop</groupId>
             <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
           </exclusion>
       </exclusions>
+-->
     </dependency>
       <dependency>
           <groupId>org.hamcrest</groupId>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
new file mode 100644
index 0000000..a488c94
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
@@ -0,0 +1,787 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.tests.e2e.hdfs;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.net.ServerSocket;
+import java.net.URL;
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.StringTokenizer;
+import java.util.concurrent.TimeoutException;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.FileInputFormat;
+import org.apache.hadoop.mapred.FileOutputFormat;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapReduceBase;
+import org.apache.hadoop.mapred.Mapper;
+import org.apache.hadoop.mapred.MiniMRClientCluster;
+import org.apache.hadoop.mapred.OutputCollector;
+import org.apache.hadoop.mapred.Reducer;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hadoop.mapred.TextOutputFormat;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl;
+import org.apache.sentry.binding.hive.conf.HiveAuthzConf;
+import org.apache.sentry.hdfs.SentryAuthorizationProvider;
+import org.apache.sentry.provider.db.SimpleDBProviderBackend;
+import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider;
+import org.apache.sentry.provider.file.PolicyFile;
+import org.apache.sentry.service.thrift.SentryService;
+import org.apache.sentry.service.thrift.SentryServiceFactory;
+import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig;
+import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig;
+import org.apache.sentry.tests.e2e.hive.StaticUserGroup;
+import org.apache.sentry.tests.e2e.hive.fs.MiniDFS;
+import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory;
+import org.apache.sentry.tests.e2e.hive.hiveserver.InternalHiveServer;
+import org.apache.sentry.tests.e2e.hive.hiveserver.InternalMetastoreServer;
+import org.fest.reflect.core.Reflection;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+import com.google.common.io.Files;
+import com.google.common.io.Resources;
+
+public class TestHDFSIntegration {
+
+  public static class WordCountMapper extends MapReduceBase implements
+      Mapper<LongWritable, Text, String, Long> {
+
+    public void map(LongWritable key, Text value,
+        OutputCollector<String, Long> output, Reporter reporter)
+        throws IOException {
+      StringTokenizer st = new StringTokenizer(value.toString());
+      while (st.hasMoreTokens()) {
+        output.collect(st.nextToken(), 1L);
+      }
+    }
+
+  }
+
+  public static class SumReducer extends MapReduceBase implements
+      Reducer<Text, Long, Text, Long> {
+
+    public void reduce(Text key, Iterator<Long> values,
+        OutputCollector<Text, Long> output, Reporter reporter)
+        throws IOException {
+
+      long sum = 0;
+      while (values.hasNext()) {
+        sum += values.next();
+      }
+      output.collect(key, sum);
+    }
+
+  }
+
+  private static final int NUM_RETRIES = 10;
+  private static final int RETRY_WAIT = 1000;
+
+  private MiniDFSCluster miniDFS;
+  private MiniMRClientCluster miniMR;
+  private InternalHiveServer hiveServer2;
+  private InternalMetastoreServer metastore;
+  private SentryService sentryService;
+  private String fsURI;
+  private int hmsPort;
+  private int sentryPort = -1;
+  private File baseDir;
+  private File policyFileLocation;
+  private UserGroupInformation adminUgi;
+  private UserGroupInformation hiveUgi;
+
+  protected static File assertCreateDir(File dir) {
+    if(!dir.isDirectory()) {
+      Assert.assertTrue("Failed creating " + dir, dir.mkdirs());
+    }
+    return dir;
+  }
+
+  private static int findPort() throws IOException {
+    ServerSocket socket = new ServerSocket(0);
+    int port = socket.getLocalPort();
+    socket.close();
+    return port;
+  }
+
+  private void waitOnSentryService() throws Exception {
+    sentryService.start();
+    final long start = System.currentTimeMillis();
+    while (!sentryService.isRunning()) {
+      Thread.sleep(1000);
+      if (System.currentTimeMillis() - start > 60000L) {
+        throw new TimeoutException("Server did not start after 60 seconds");
+      }
+    }
+  }
+
+  @Before
+  public void setup() throws Exception {
+    Class.forName("org.apache.hive.jdbc.HiveDriver");
+    baseDir = Files.createTempDir();
+    policyFileLocation = new File(baseDir, HiveServerFactory.AUTHZ_PROVIDER_FILENAME);
+    PolicyFile policyFile = PolicyFile.setAdminOnServer1("hive")
+        .setUserGroupMapping(StaticUserGroup.getStaticMapping());
+    policyFile.write(policyFileLocation);
+
+    adminUgi = UserGroupInformation.createUserForTesting(
+        System.getProperty("user.name"), new String[] { "supergroup" });
+
+    hiveUgi = UserGroupInformation.createUserForTesting(
+        "hive", new String[] { "hive" });
+
+    // Start Sentry
+    startSentry();
+
+    // Start HDFS and MR
+    startDFSandYARN();
+
+    // Start HiveServer2 and Metastore
+    startHiveAndMetastore();
+
+  }
+
+  private void startHiveAndMetastore() throws IOException, InterruptedException {
+    hiveUgi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        HiveConf hiveConf = new HiveConf();
+        hiveConf.set("sentry.metastore.plugins", "org.apache.sentry.hdfs.MetastorePlugin");
+        hiveConf.set("sentry.service.client.server.rpc-address", "localhost");
+        hiveConf.set("sentry.hdfs.service.client.server.rpc-address", "localhost");
+        hiveConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
+        hiveConf.set("sentry.service.client.server.rpc-port", String.valueOf(sentryPort));
+//        hiveConf.set("sentry.service.server.compact.transport", "true");
+//        hiveConf.set("sentry.service.client.compact.transport", "true");
+        hiveConf.set("sentry.service.security.mode", "none");
+        hiveConf.set("sentry.hdfs.service.security.mode", "none");
+        hiveConf.set("sentry.hdfs.init.update.retry.delay.ms", "500");
+        hiveConf.set("sentry.hive.provider.backend", "org.apache.sentry.provider.db.SimpleDBProviderBackend");
+        hiveConf.set("sentry.provider", LocalGroupResourceAuthorizationProvider.class.getName());
+        hiveConf.set("sentry.hive.provider", LocalGroupResourceAuthorizationProvider.class.getName());
+        hiveConf.set("sentry.hive.provider.resource", policyFileLocation.getPath());
+        hiveConf.set("sentry.hive.testing.mode", "true");
+        hiveConf.set("sentry.hive.server", "server1");
+
+        hiveConf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING);
+        hiveConf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFileLocation.getPath());
+        hiveConf.set("fs.defaultFS", fsURI);
+        hiveConf.set("fs.default.name", fsURI);
+        hiveConf.set("hive.metastore.execute.setugi", "true");
+        hiveConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=" + baseDir.getAbsolutePath() + "/metastore_db;create=true");
+        hiveConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver");
+        hiveConf.set("javax.jdo.option.ConnectionUserName", "hive");
+        hiveConf.set("javax.jdo.option.ConnectionPassword", "hive");
+        hiveConf.set("datanucleus.autoCreateSchema", "true");
+        hiveConf.set("datanucleus.fixedDatastore", "false");
+        hiveConf.set("datanucleus.autoStartMechanism", "SchemaTable");
+        hmsPort = findPort();
+        System.out.println("\n\n HMS port : " + hmsPort + "\n\n");
+        hiveConf.set("hive.metastore.uris", "thrift://localhost:" + hmsPort);
+        hiveConf.set("hive.metastore.pre.event.listeners", "org.apache.sentry.binding.metastore.MetastoreAuthzBinding");
+        hiveConf.set("hive.metastore.event.listeners", "org.apache.sentry.binding.metastore.SentryMetastorePostEventListener");
+        hiveConf.set("hive.security.authorization.task.factory", "org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl");
+        hiveConf.set("hive.server2.session.hook", "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook");
+
+        HiveAuthzConf authzConf = new HiveAuthzConf(Resources.getResource("sentry-site.xml"));
+        authzConf.addResource(hiveConf);
+        File confDir = assertCreateDir(new File(baseDir, "etc"));
+        File accessSite = new File(confDir, HiveAuthzConf.AUTHZ_SITE_FILE);
+        OutputStream out = new FileOutputStream(accessSite);
+        authzConf.set("fs.defaultFS", fsURI);
+        authzConf.writeXml(out);
+        out.close();
+
+        hiveConf.set("hive.sentry.conf.url", accessSite.getPath());
+        System.out.println("Sentry client file : " + accessSite.getPath());
+
+        File hiveSite = new File(confDir, "hive-site.xml");
+        hiveConf.set("hive.server2.enable.doAs", "false");
+        hiveConf.set(HiveAuthzConf.HIVE_SENTRY_CONF_URL, accessSite.toURI().toURL()
+            .toExternalForm());
+        out = new FileOutputStream(hiveSite);
+        hiveConf.writeXml(out);
+        out.close();
+
+        Reflection.staticField("hiveSiteURL")
+        .ofType(URL.class)
+        .in(HiveConf.class)
+        .set(hiveSite.toURI().toURL());
+
+        metastore = new InternalMetastoreServer(hiveConf);
+        new Thread() {
+          @Override
+          public void run() {
+            try {
+              metastore.start();
+              while(true){}
+            } catch (Exception e) {
+              System.out.println("Could not start Hive Server");
+            }
+          }
+        }.start();
+
+        hiveServer2 = new InternalHiveServer(hiveConf);
+        new Thread() {
+          @Override
+          public void run() {
+            try {
+              hiveServer2.start();
+              while(true){}
+            } catch (Exception e) {
+              System.out.println("Could not start Hive Server");
+            }
+          }
+        }.start();
+
+        Thread.sleep(10000);
+        return null;
+      }
+    });
+  }
+
+  private void startDFSandYARN() throws IOException,
+      InterruptedException {
+    adminUgi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
+        Configuration conf = new HdfsConfiguration();
+        conf.set(DFSConfigKeys.DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
+            SentryAuthorizationProvider.class.getName());
+        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+        conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+        File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
+        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
+        conf.set("hadoop.security.group.mapping",
+            MiniDFS.PseudoGroupMappingService.class.getName());
+        Configuration.addDefaultResource("test.xml");
+
+        conf.set("sentry.authorization-provider.hdfs-path-prefixes", "/user/hive/warehouse,/tmp/external");
+        conf.set("sentry.authorization-provider.cache-refresh-retry-wait.ms", "5000");
+        conf.set("sentry.authorization-provider.cache-stale-threshold.ms", "3000");
+
+        conf.set("sentry.hdfs.service.security.mode", "none");
+        conf.set("sentry.hdfs.service.client.server.rpc-address", "localhost");
+        conf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
+        EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+        miniDFS = new MiniDFSCluster.Builder(conf).build();
+        Path tmpPath = new Path("/tmp");
+        Path hivePath = new Path("/user/hive");
+        Path warehousePath = new Path(hivePath, "warehouse");
+        miniDFS.getFileSystem().mkdirs(warehousePath);
+        boolean directory = miniDFS.getFileSystem().isDirectory(warehousePath);
+        System.out.println("\n\n Is dir :" + directory + "\n\n");
+        System.out.println("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n");
+        fsURI = miniDFS.getFileSystem().getUri().toString();
+        conf.set("fs.defaultFS", fsURI);
+
+        // Create Yarn cluster
+        // miniMR = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
+
+        miniDFS.getFileSystem().mkdirs(tmpPath);
+        miniDFS.getFileSystem().setPermission(tmpPath, FsPermission.valueOf("drwxrwxrwx"));
+        miniDFS.getFileSystem().setOwner(hivePath, "hive", "hive");
+        miniDFS.getFileSystem().setOwner(warehousePath, "hive", "hive");
+        System.out.println("\n\n Owner :"
+            + miniDFS.getFileSystem().getFileStatus(warehousePath).getOwner()
+            + ", "
+            + miniDFS.getFileSystem().getFileStatus(warehousePath).getGroup()
+            + "\n\n");
+        System.out.println("\n\n Owner tmp :"
+            + miniDFS.getFileSystem().getFileStatus(tmpPath).getOwner() + ", "
+            + miniDFS.getFileSystem().getFileStatus(tmpPath).getGroup() + ", "
+            + miniDFS.getFileSystem().getFileStatus(tmpPath).getPermission() + ", "
+            + "\n\n");
+
+        int dfsSafeCheckRetry = 30;
+        boolean hasStarted = false;
+        for (int i = dfsSafeCheckRetry; i > 0; i--) {
+          if (!miniDFS.getFileSystem().isInSafeMode()) {
+            hasStarted = true;
+            System.out.println("HDFS safemode check num times : " + (31 - i));
+            break;
+          }
+        }
+        if (!hasStarted) {
+          throw new RuntimeException("HDFS hasnt exited safe mode yet..");
+        }
+
+        return null;
+      }
+    });
+  }
+
+  private void startSentry() throws IOException,
+      InterruptedException {
+    hiveUgi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        Configuration sentryConf = new Configuration(false);
+        Map<String, String> properties = Maps.newHashMap();
+        properties.put(HiveServerFactory.AUTHZ_PROVIDER_BACKEND,
+            SimpleDBProviderBackend.class.getName());
+        properties.put(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY.varname,
+            SentryHiveAuthorizationTaskFactoryImpl.class.getName());
+        properties
+            .put(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS.varname, "2");
+        properties.put("hive.metastore.uris", "thrift://localhost:" + hmsPort);
+        properties.put(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE);
+//        properties.put("sentry.service.server.compact.transport", "true");
+        properties.put("sentry.hive.testing.mode", "true");
+        properties.put("sentry.service.reporting", "JMX");
+        properties.put(ServerConfig.ADMIN_GROUPS, "hive,admin");
+        properties.put(ServerConfig.RPC_ADDRESS, "localhost");
+        properties.put(ServerConfig.RPC_PORT, String.valueOf(sentryPort < 0 ? 0 : sentryPort));
+        properties.put(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false");
+
+        properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING);
+        properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFileLocation.getPath());
+        properties.put(ServerConfig.SENTRY_STORE_JDBC_URL,
+            "jdbc:derby:;databaseName=" + baseDir.getPath()
+                + "/sentrystore_db;create=true");
+        properties.put("sentry.service.processor.factories",
+            "org.apache.sentry.provider.db.service.thrift.SentryPolicyStoreProcessorFactory,org.apache.sentry.hdfs.SentryHDFSServiceProcessorFactory");
+        properties.put("sentry.policy.store.plugins", "org.apache.sentry.hdfs.SentryPlugin");
+        properties.put(ServerConfig.RPC_MIN_THREADS, "3");
+        for (Map.Entry<String, String> entry : properties.entrySet()) {
+          sentryConf.set(entry.getKey(), entry.getValue());
+        }
+        sentryService = new SentryServiceFactory().create(sentryConf);
+        properties.put(ClientConfig.SERVER_RPC_ADDRESS, sentryService.getAddress()
+            .getHostName());
+        sentryConf.set(ClientConfig.SERVER_RPC_ADDRESS, sentryService.getAddress()
+            .getHostName());
+        properties.put(ClientConfig.SERVER_RPC_PORT,
+            String.valueOf(sentryService.getAddress().getPort()));
+        sentryConf.set(ClientConfig.SERVER_RPC_PORT,
+            String.valueOf(sentryService.getAddress().getPort()));
+        waitOnSentryService();
+        sentryPort = sentryService.getAddress().getPort();
+        System.out.println("\n\n Sentry port : " + sentryPort + "\n\n");
+        return null;
+      }
+    });
+  }
+
+  @After
+  public void cleanUp() throws Exception {
+    try {
+      if (miniDFS != null) {
+        miniDFS.shutdown();
+      }
+    } finally {
+      try {
+        if (hiveServer2 != null) {
+          hiveServer2.shutdown();
+        }
+      } finally {
+        if (metastore != null) {
+          metastore.shutdown();
+        }
+      }
+    }
+  }
+
+  @Test
+  public void testEnd2End() throws Throwable {
+
+    Connection conn = hiveServer2.createConnection("hive", "hive");
+    Statement stmt = conn.createStatement();
+    stmt.execute("create role admin_role");
+    stmt.execute("grant role admin_role to group hive");
+    stmt.execute("grant all on server server1 to role admin_role");
+    stmt.execute("create table p1 (s string) partitioned by (month int, day int)");
+    stmt.execute("alter table p1 add partition (month=1, day=1)");
+    stmt.execute("alter table p1 add partition (month=1, day=2)");
+    stmt.execute("alter table p1 add partition (month=2, day=1)");
+    stmt.execute("alter table p1 add partition (month=2, day=2)");
+
+    stmt.execute("create role p1_admin");
+    stmt.execute("grant role p1_admin to group hbase");
+
+    verifyOnAllSubDirs("/user/hive/warehouse/p1", null, "hbase", false);
+
+    loadData(stmt);
+
+    verifyHDFSandMR(stmt);
+
+    stmt.execute("revoke select on table p1 from role p1_admin");
+    verifyOnAllSubDirs("/user/hive/warehouse/p1", null, "hbase", false);
+
+    stmt.execute("grant all on table p1 to role p1_admin");
+    verifyOnAllSubDirs("/user/hive/warehouse/p1", FsAction.ALL, "hbase", true);
+
+    stmt.execute("revoke select on table p1 from role p1_admin");
+    verifyOnAllSubDirs("/user/hive/warehouse/p1", FsAction.WRITE_EXECUTE, "hbase", true);
+
+    // Verify table rename works
+    stmt.execute("alter table p1 rename to p3");
+    verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true);
+
+    stmt.execute("alter table p3 partition (month=1, day=1) rename to partition (month=1, day=3)");
+    verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true);
+    verifyOnAllSubDirs("/user/hive/warehouse/p3/month=1/day=3", FsAction.WRITE_EXECUTE, "hbase", true);
+
+    sentryService.stop();
+    // Verify that Sentry permission are still enforced for the "stale" period
+    verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true);
+
+    // Verify that Sentry permission are NOT enforced AFTER "stale" period
+    verifyOnAllSubDirs("/user/hive/warehouse/p3", null, "hbase", false);
+
+    startSentry();
+    // Verify that After Sentry restart permissions are re-enforced
+    verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true);
+
+    // Create new table and verify everything is fine after restart...
+    stmt.execute("create table p2 (s string) partitioned by (month int, day int)");
+    stmt.execute("alter table p2 add partition (month=1, day=1)");
+    stmt.execute("alter table p2 add partition (month=1, day=2)");
+    stmt.execute("alter table p2 add partition (month=2, day=1)");
+    stmt.execute("alter table p2 add partition (month=2, day=2)");
+
+    verifyOnAllSubDirs("/user/hive/warehouse/p2", null, "hbase", false);
+
+    stmt.execute("grant select on table p2 to role p1_admin");
+    verifyOnAllSubDirs("/user/hive/warehouse/p2", FsAction.READ_EXECUTE, "hbase", true);
+
+    stmt.execute("grant select on table p2 to role p1_admin");
+    verifyOnAllSubDirs("/user/hive/warehouse/p2", FsAction.READ_EXECUTE, "hbase", true);
+
+    // Create external table
+    writeToPath("/tmp/external/ext1", 5, "foo", "bar");
+
+    stmt.execute("create table ext1 (s string) location \'/tmp/external/ext1\'");
+    verifyQuery(stmt, "ext1", 5);
+
+    // Ensure existing group permissions are never returned..
+    verifyOnAllSubDirs("/tmp/external/ext1", null, "bar", false);
+    verifyOnAllSubDirs("/tmp/external/ext1", null, "hbase", false);
+
+    stmt.execute("grant all on table ext1 to role p1_admin");
+    verifyOnAllSubDirs("/tmp/external/ext1", FsAction.ALL, "hbase", true);
+
+    stmt.execute("revoke select on table ext1 from role p1_admin");
+    verifyOnAllSubDirs("/tmp/external/ext1", FsAction.WRITE_EXECUTE, "hbase", true);
+
+    // Verify database operations works correctly
+    stmt.execute("create database db1");
+    verifyOnAllSubDirs("/user/hive/warehouse/db1.db", null, "hbase", false);
+
+    stmt.execute("create table db1.tbl1 (s string)");
+    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl1", null, "hbase", false);
+    stmt.execute("create table db1.tbl2 (s string)");
+    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl2", null, "hbase", false);
+
+    // Verify db privileges are propagated to tables
+    stmt.execute("grant select on database db1 to role p1_admin");
+    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl1", FsAction.READ_EXECUTE, "hbase", true);
+    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl2", FsAction.READ_EXECUTE, "hbase", true);
+
+    stmt.execute("use db1");
+    stmt.execute("grant all on table tbl1 to role p1_admin");
+
+    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl1", FsAction.ALL, "hbase", true);
+    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl2", FsAction.READ_EXECUTE, "hbase", true);
+
+    // Verify recursive revoke
+    stmt.execute("revoke select on database db1 from role p1_admin");
+
+    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl1", FsAction.WRITE_EXECUTE, "hbase", true);
+    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl2", null, "hbase", false);
+
+    // Verify cleanup..
+    stmt.execute("drop table tbl1");
+    Assert.assertFalse(miniDFS.getFileSystem().exists(new Path("/user/hive/warehouse/db1.db/tbl1")));
+
+    stmt.execute("drop table tbl2");
+    Assert.assertFalse(miniDFS.getFileSystem().exists(new Path("/user/hive/warehouse/db1.db/tbl2")));
+
+    stmt.execute("use default");
+    stmt.execute("drop database db1");
+    Assert.assertFalse(miniDFS.getFileSystem().exists(new Path("/user/hive/warehouse/db1.db")));
+
+    // START : Verify external table set location..
+    writeToPath("/tmp/external/tables/ext2_before/i=1", 5, "foo", "bar");
+    writeToPath("/tmp/external/tables/ext2_before/i=2", 5, "foo", "bar");
+
+    stmt.execute("create external table ext2 (s string) partitioned by (i int) location \'/tmp/external/tables/ext2_before\'");
+    stmt.execute("alter table ext2 add partition (i=1)");
+    stmt.execute("alter table ext2 add partition (i=2)");
+    verifyQuery(stmt, "ext2", 10);
+    verifyOnAllSubDirs("/tmp/external/tables/ext2_before", null, "hbase", false);
+    stmt.execute("grant all on table ext2 to role p1_admin");
+    verifyOnPath("/tmp/external/tables/ext2_before", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=1", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=2", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=1/stuff.txt", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=2/stuff.txt", FsAction.ALL, "hbase", true);
+
+    writeToPath("/tmp/external/tables/ext2_after/i=1", 6, "foo", "bar");
+    writeToPath("/tmp/external/tables/ext2_after/i=2", 6, "foo", "bar");
+
+    stmt.execute("alter table ext2 set location \'hdfs:///tmp/external/tables/ext2_after\'");
+    // Even though table location is altered, partition location is still old (still 10 rows)
+    verifyQuery(stmt, "ext2", 10);
+    // You have to explicitly alter partition location..
+    verifyOnPath("/tmp/external/tables/ext2_before", null, "hbase", false);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=1", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=2", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=1/stuff.txt", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=2/stuff.txt", FsAction.ALL, "hbase", true);
+
+    stmt.execute("alter table ext2 partition (i=1) set location \'hdfs:///tmp/external/tables/ext2_after/i=1\'");
+    stmt.execute("alter table ext2 partition (i=2) set location \'hdfs:///tmp/external/tables/ext2_after/i=2\'");
+    // Now that partition location is altered, it picks up new data (12 rows instead of 10)
+    verifyQuery(stmt, "ext2", 12);
+
+    verifyOnPath("/tmp/external/tables/ext2_before", null, "hbase", false);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=1", null, "hbase", false);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=2", null, "hbase", false);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=1/stuff.txt", null, "hbase", false);
+    verifyOnPath("/tmp/external/tables/ext2_before/i=2/stuff.txt", null, "hbase", false);
+    verifyOnPath("/tmp/external/tables/ext2_after", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_after/i=1", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_after/i=2", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_after/i=1/stuff.txt", FsAction.ALL, "hbase", true);
+    verifyOnPath("/tmp/external/tables/ext2_after/i=2/stuff.txt", FsAction.ALL, "hbase", true);
+    // END : Verify external table set location..
+
+    stmt.close();
+    conn.close();
+  }
+
+  private void verifyQuery(Statement stmt, String table, int n) throws Throwable {
+    verifyQuery(stmt, table, n, NUM_RETRIES);
+  }
+  
+  private void verifyQuery(Statement stmt, String table, int n, int retry) throws Throwable {
+    ResultSet rs = null;
+    try {
+      rs = stmt.executeQuery("select * from " + table);
+      int numRows = 0;
+      while (rs.next()) { numRows++; }
+      Assert.assertEquals(n, numRows);
+    } catch (Throwable th) {
+      if (retry > 0) {
+        Thread.sleep(RETRY_WAIT);
+        verifyQuery(stmt, table, n, retry - 1);
+      } else {
+        throw th;
+      }
+    }
+  }
+
+  private void loadData(Statement stmt) throws IOException, SQLException {
+    FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path("/tmp/f1.txt"));
+    f1.writeChars("m1d1_t1\n");
+    f1.writeChars("m1d1_t2\n");
+    f1.writeChars("m1d1_t3\n");
+    f1.flush();
+    f1.close();
+    stmt.execute("load data inpath \'/tmp/f1.txt\' overwrite into table p1 partition (month=1, day=1)");
+    FSDataOutputStream f2 = miniDFS.getFileSystem().create(new Path("/tmp/f2.txt"));
+    f2.writeChars("m2d2_t4\n");
+    f2.writeChars("m2d2_t5\n");
+    f2.writeChars("m2d2_t6\n");
+    f2.flush();
+    f2.close();
+    stmt.execute("load data inpath \'/tmp/f2.txt\' overwrite into table p1 partition (month=2, day=2)");
+    ResultSet rs = stmt.executeQuery("select * from p1");
+    List<String> vals = new ArrayList<String>(); 
+    while (rs.next()) {
+      vals.add(rs.getString(1));
+    }
+    Assert.assertEquals(6, vals.size());
+    rs.close();
+  }
+
+  private void writeToPath(String path, int numRows, String user, String group) throws IOException {
+    Path p = new Path(path);
+    miniDFS.getFileSystem().mkdirs(p);
+    miniDFS.getFileSystem().setOwner(p, user, group);
+//    miniDFS.getFileSystem().setPermission(p, FsPermission.valueOf("-rwxrwx---"));
+    FSDataOutputStream f1 = miniDFS.getFileSystem().create(new Path(path + "/stuff.txt"));
+    for (int i = 0; i < numRows; i++) {
+      f1.writeChars("random" + i + "\n");
+    }
+    f1.flush();
+    f1.close();
+    miniDFS.getFileSystem().setOwner(new Path(path + "/stuff.txt"), "asuresh", "supergroup");
+    miniDFS.getFileSystem().setPermission(new Path(path + "/stuff.txt"), FsPermission.valueOf("-rwxrwx---"));
+  }
+
+  private void verifyHDFSandMR(Statement stmt) throws Throwable {
+    // hbase user should not be allowed to read...
+    UserGroupInformation hbaseUgi = UserGroupInformation.createUserForTesting("hbase", new String[] {"hbase"});
+    hbaseUgi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        try {
+          miniDFS.getFileSystem().open(new Path("/user/hive/warehouse/p1/month=1/day=1/f1.txt"));
+          Assert.fail("Should not be allowed !!");
+        } catch (Exception e) {
+          Assert.assertEquals("Wrong Error : " + e.getMessage(), true, e.getMessage().contains("Permission denied: user=hbase"));
+        }
+        return null;
+      }
+    });
+
+    // WordCount should fail..
+    // runWordCount(new JobConf(miniMR.getConfig()), "/user/hive/warehouse/p1/month=1/day=1", "/tmp/wc_out");
+
+    stmt.execute("grant select on table p1 to role p1_admin");
+
+    verifyOnAllSubDirs("/user/hive/warehouse/p1", FsAction.READ_EXECUTE, "hbase", true);
+    // hbase user should now be allowed to read...
+    hbaseUgi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        Path p = new Path("/user/hive/warehouse/p1/month=2/day=2/f2.txt");
+        BufferedReader in = new BufferedReader(new InputStreamReader(miniDFS.getFileSystem().open(p)));
+        String line = null;
+        List<String> lines = new ArrayList<String>();
+        do {
+          line = in.readLine();
+          if (line != null) lines.add(line);
+        } while (line != null);
+        Assert.assertEquals(3, lines.size());
+        in.close();
+        return null;
+      }
+    });
+
+  }
+
+  private void verifyOnAllSubDirs(String path, FsAction fsAction, String group, boolean groupShouldExist) throws Throwable {
+    verifyOnAllSubDirs(path, fsAction, group, groupShouldExist, true);
+  }
+
+  private void verifyOnPath(String path, FsAction fsAction, String group, boolean groupShouldExist) throws Throwable {
+    verifyOnAllSubDirs(path, fsAction, group, groupShouldExist, false);
+  }
+
+  private void verifyOnAllSubDirs(String path, FsAction fsAction, String group, boolean groupShouldExist, boolean recurse) throws Throwable {
+    verifyOnAllSubDirs(new Path(path), fsAction, group, groupShouldExist, recurse, NUM_RETRIES);
+  }
+
+  private void verifyOnAllSubDirs(Path p, FsAction fsAction, String group, boolean groupShouldExist, boolean recurse, int retry) throws Throwable {
+    FileStatus fStatus = null;
+    try {
+      fStatus = miniDFS.getFileSystem().getFileStatus(p);
+      if (groupShouldExist) {
+        Assert.assertEquals(fsAction, getAcls(p).get(group));
+      } else {
+        Assert.assertFalse(getAcls(p).containsKey(group));
+      }
+    } catch (Throwable th) {
+      if (retry > 0) {
+        Thread.sleep(RETRY_WAIT);
+        verifyOnAllSubDirs(p, fsAction, group, groupShouldExist, recurse, retry - 1);
+      } else {
+        throw th;
+      }
+    }
+    if (recurse) {
+      if (fStatus.isDirectory()) {
+        FileStatus[] children = miniDFS.getFileSystem().listStatus(p);
+        for (FileStatus fs : children) {
+          verifyOnAllSubDirs(fs.getPath(), fsAction, group, groupShouldExist, recurse, NUM_RETRIES);
+        }
+      }
+    }
+  }
+
+  private Map<String, FsAction> getAcls(Path path) throws Exception {
+    AclStatus aclStatus = miniDFS.getFileSystem().getAclStatus(path);
+    Map<String, FsAction> acls = new HashMap<String, FsAction>();
+    for (AclEntry ent : aclStatus.getEntries()) {
+      if (ent.getType().equals(AclEntryType.GROUP)) {
+        acls.put(ent.getName(), ent.getPermission());
+      }
+    }
+    return acls;
+  }
+
+  private void runWordCount(JobConf job, String inPath, String outPath) throws Exception {
+    Path in = new Path(inPath);
+    Path out = new Path(outPath);
+    miniDFS.getFileSystem().delete(out, true);
+    job.setJobName("TestWC");
+    JobClient jobClient = new JobClient(job);
+    RunningJob submittedJob = null;
+    FileInputFormat.setInputPaths(job, in);
+    FileOutputFormat.setOutputPath(job, out);
+    job.set("mapreduce.output.textoutputformat.separator", " ");
+    job.setInputFormat(TextInputFormat.class);
+    job.setMapOutputKeyClass(Text.class);
+    job.setMapOutputValueClass(LongWritable.class);
+    job.setOutputKeyClass(Text.class);
+    job.setOutputValueClass(LongWritable.class);
+    job.setMapperClass(WordCountMapper.class);
+    job.setReducerClass(SumReducer.class);
+    job.setOutputFormat(TextOutputFormat.class);
+    job.setNumReduceTasks(1);
+    job.setInt("mapreduce.map.maxattempts", 1);
+    job.setInt("mapreduce.reduce.maxattempts", 1);
+
+    submittedJob = jobClient.submitJob(job);
+    if (!jobClient.monitorAndPrintJob(job, submittedJob)) {
+      throw new IOException("job Failed !!");
+    }
+    
+  }
+}


[4/9] incubator-sentry git commit: SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

Posted by ls...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
new file mode 100644
index 0000000..5425daa
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import javax.security.auth.callback.CallbackHandler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.sentry.hdfs.service.thrift.SentryHDFSService;
+import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Client;
+import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse;
+import org.apache.sentry.hdfs.service.thrift.TPathsUpdate;
+import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate;
+import org.apache.sentry.hdfs.ServiceConstants.ClientConfig;
+import org.apache.sentry.hdfs.ServiceConstants.ServerConfig;
+import org.apache.thrift.protocol.TBinaryProtocol;
+//import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TMultiplexedProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TSaslClientTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+public class SentryHDFSServiceClient {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceClient.class);
+
+  public static final String SENTRY_HDFS_SERVICE_NAME = "SentryHDFSService";
+
+  public static class SentryAuthzUpdate {
+
+    private final List<PermissionsUpdate> permUpdates;
+    private final List<PathsUpdate> pathUpdates;
+
+    public SentryAuthzUpdate(List<PermissionsUpdate> permUpdates, List<PathsUpdate> pathUpdates) {
+      this.permUpdates = permUpdates;
+      this.pathUpdates = pathUpdates;
+    }
+
+    public List<PermissionsUpdate> getPermUpdates() {
+      return permUpdates;
+    }
+
+    public List<PathsUpdate> getPathUpdates() {
+      return pathUpdates;
+    }
+  }
+  
+  /**
+   * This transport wraps the Sasl transports to set up the right UGI context for open().
+   */
+  public static class UgiSaslClientTransport extends TSaslClientTransport {
+    protected UserGroupInformation ugi = null;
+
+    public UgiSaslClientTransport(String mechanism, String authorizationId,
+        String protocol, String serverName, Map<String, String> props,
+        CallbackHandler cbh, TTransport transport, boolean wrapUgi)
+        throws IOException {
+      super(mechanism, authorizationId, protocol, serverName, props, cbh,
+          transport);
+      if (wrapUgi) {
+        ugi = UserGroupInformation.getLoginUser();
+      }
+    }
+
+    // open the SASL transport with using the current UserGroupInformation
+    // This is needed to get the current login context stored
+    @Override
+    public void open() throws TTransportException {
+      if (ugi == null) {
+        baseOpen();
+      } else {
+        try {
+          ugi.doAs(new PrivilegedExceptionAction<Void>() {
+            public Void run() throws TTransportException {
+              baseOpen();
+              return null;
+            }
+          });
+        } catch (IOException e) {
+          throw new TTransportException("Failed to open SASL transport", e);
+        } catch (InterruptedException e) {
+          throw new TTransportException(
+              "Interrupted while opening underlying transport", e);
+        }
+      }
+    }
+
+    private void baseOpen() throws TTransportException {
+      super.open();
+    }
+  }
+
+  private final Configuration conf;
+  private final InetSocketAddress serverAddress;
+  private final int connectionTimeout;
+  private boolean kerberos;
+  private TTransport transport;
+
+  private String[] serverPrincipalParts;
+  private Client client;
+  
+  public SentryHDFSServiceClient(Configuration conf) throws IOException {
+    this.conf = conf;
+    Preconditions.checkNotNull(this.conf, "Configuration object cannot be null");
+    this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull(
+                           conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config key "
+                           + ClientConfig.SERVER_RPC_ADDRESS + " is required"), conf.getInt(
+                           ClientConfig.SERVER_RPC_PORT, ClientConfig.SERVER_RPC_PORT_DEFAULT));
+    this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT,
+                                         ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT);
+    kerberos = ClientConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase(
+        conf.get(ClientConfig.SECURITY_MODE, ClientConfig.SECURITY_MODE_KERBEROS).trim());
+    transport = new TSocket(serverAddress.getHostName(),
+        serverAddress.getPort(), connectionTimeout);
+    if (kerberos) {
+      String serverPrincipal = Preconditions.checkNotNull(
+          conf.get(ClientConfig.PRINCIPAL), ClientConfig.PRINCIPAL + " is required");
+
+      // Resolve server host in the same way as we are doing on server side
+      serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, serverAddress.getAddress());
+      LOGGER.info("Using server kerberos principal: " + serverPrincipal);
+
+      serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal);
+      Preconditions.checkArgument(serverPrincipalParts.length == 3,
+           "Kerberos principal should have 3 parts: " + serverPrincipal);
+      boolean wrapUgi = "true".equalsIgnoreCase(conf
+          .get(ClientConfig.SECURITY_USE_UGI_TRANSPORT, "true"));
+      transport = new UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(),
+          null, serverPrincipalParts[0], serverPrincipalParts[1],
+          ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi);
+    } else {
+      serverPrincipalParts = null;
+    }
+    try {
+      transport.open();
+    } catch (TTransportException e) {
+      throw new IOException("Transport exception while opening transport: " + e.getMessage(), e);
+    }
+    LOGGER.info("Successfully opened transport: " + transport + " to " + serverAddress);
+    TProtocol tProtocol = new TBinaryProtocol(transport);
+//    if (conf.getBoolean(ClientConfig.USE_COMPACT_TRANSPORT,
+//        ClientConfig.USE_COMPACT_TRANSPORT_DEFAULT)) {
+//      tProtocol = new TCompactProtocol(transport);
+//    } else {
+//      tProtocol = new TBinaryProtocol(transport);
+//    }
+    TMultiplexedProtocol protocol = new TMultiplexedProtocol(
+      tProtocol, SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME);
+    client = new SentryHDFSService.Client(protocol);
+    LOGGER.info("Successfully created client");
+  }
+
+  public synchronized void notifyHMSUpdate(PathsUpdate update)
+      throws IOException {
+    try {
+      client.handle_hms_notification(update.toThrift());
+    } catch (Exception e) {
+      throw new IOException("Thrift Exception occurred !!", e);
+    }
+  }
+
+  public synchronized long getLastSeenHMSPathSeqNum()
+      throws IOException {
+    try {
+      return client.check_hms_seq_num(-1);
+    } catch (Exception e) {
+      throw new IOException("Thrift Exception occurred !!", e);
+    }
+  }
+
+  public synchronized SentryAuthzUpdate getAllUpdatesFrom(long permSeqNum, long pathSeqNum)
+      throws IOException {
+    SentryAuthzUpdate retVal = new SentryAuthzUpdate(new LinkedList<PermissionsUpdate>(), new LinkedList<PathsUpdate>());
+    try {
+      TAuthzUpdateResponse sentryUpdates = client.get_all_authz_updates_from(permSeqNum, pathSeqNum);
+      if (sentryUpdates.getAuthzPathUpdate() != null) {
+        for (TPathsUpdate pathsUpdate : sentryUpdates.getAuthzPathUpdate()) {
+          retVal.getPathUpdates().add(new PathsUpdate(pathsUpdate));
+        }
+      }
+      if (sentryUpdates.getAuthzPermUpdate() != null) {
+        for (TPermissionsUpdate permsUpdate : sentryUpdates.getAuthzPermUpdate()) {
+          retVal.getPermUpdates().add(new PermissionsUpdate(permsUpdate));
+        }
+      }
+    } catch (Exception e) {
+      throw new IOException("Thrift Exception occurred !!", e);
+    }
+    return retVal;
+  }
+
+  public void close() {
+    if (transport != null) {
+      transport.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
new file mode 100644
index 0000000..64cb943
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.security.sasl.Sasl;
+
+import com.google.common.collect.ImmutableMap;
+
+public class ServiceConstants {
+
+  private static final ImmutableMap<String, String> SASL_PROPERTIES;
+
+  static {
+    Map<String, String> saslProps = new HashMap<String, String>();
+    saslProps.put(Sasl.SERVER_AUTH, "true");
+    saslProps.put(Sasl.QOP, "auth-conf");
+    SASL_PROPERTIES = ImmutableMap.copyOf(saslProps);
+  }
+
+  public static class ServerConfig {
+    public static final ImmutableMap<String, String> SASL_PROPERTIES = ServiceConstants.SASL_PROPERTIES;
+    /**
+     * This configuration parameter is only meant to be used for testing purposes.
+     */
+    public static final String SENTRY_HDFS_INTEGRATION_PATH_PREFIXES = "sentry.hdfs.integration.path.prefixes";
+    public static final String[] SENTRY_HDFS_INTEGRATION_PATH_PREFIXES_DEFAULT =
+        new String[]{"/user/hive/warehouse"};
+    public static final String SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_MS = "sentry.hdfs.init.update.retry.delay.ms";
+    public static final int SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_DEFAULT = 10000;
+
+  }
+  public static class ClientConfig {
+    public static final ImmutableMap<String, String> SASL_PROPERTIES = ServiceConstants.SASL_PROPERTIES;
+
+    public static final String SECURITY_MODE = "sentry.hdfs.service.security.mode";
+    public static final String SECURITY_MODE_KERBEROS = "kerberos";
+    public static final String SECURITY_MODE_NONE = "none";
+    public static final String SECURITY_USE_UGI_TRANSPORT = "sentry.hdfs.service.security.use.ugi";
+    public static final String PRINCIPAL = "sentry.hdfs.service.server.principal";
+
+    public static final String SERVER_RPC_PORT = "sentry.hdfs.service.client.server.rpc-port";
+    public static final int SERVER_RPC_PORT_DEFAULT = 8038;
+
+    public static final String SERVER_RPC_ADDRESS = "sentry.hdfs.service.client.server.rpc-address";
+
+    public static final String SERVER_RPC_CONN_TIMEOUT = "sentry.hdfs.service.client.server.rpc-connection-timeout";
+    public static final int SERVER_RPC_CONN_TIMEOUT_DEFAULT = 200000;
+    public static final String USE_COMPACT_TRANSPORT = "sentry.hdfs.service.client.compact.transport";
+    public static final boolean USE_COMPACT_TRANSPORT_DEFAULT = false;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java
new file mode 100644
index 0000000..ba932ac
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.concurrent.locks.ReadWriteLock;
+
+public interface Updateable<K extends Updateable.Update> {
+
+  /**
+   * Thrift currently does not support class inheritance.We need all update
+   * objects to expose a unified API. A wrapper class need to be created 
+   * implementing this interface and containing the generated thrift class as 
+   * a work around
+   */
+  public interface Update {
+
+    boolean hasFullImage();
+    
+    long getSeqNum();
+
+    void setSeqNum(long seqNum);
+
+  }
+
+  /**
+   * Apply multiple partial updates in order
+   * @param update
+   * @param lock External Lock. 
+   * @return
+   */
+  public void updatePartial(Iterable<K> update, ReadWriteLock lock);
+
+  /**
+   * This returns a new object with the full update applied
+   * @param update
+   * @return
+   */
+  public Updateable<K> updateFull(K update);
+
+  /**
+   * Return sequence number of Last Update
+   */
+  public long getLastUpdatedSeqNum();
+
+  /**
+   * Create and Full image update of the local data structure
+   * @param currSeqNum
+   * @return
+   */
+  public K createFullImageUpdate(long currSeqNum);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java
new file mode 100644
index 0000000..03b288b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReadWriteLock;
+
+import org.apache.sentry.hdfs.service.thrift.TPathChanges;
+import org.apache.sentry.hdfs.service.thrift.TPathsDump;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class UpdateableAuthzPaths implements AuthzPaths, Updateable<PathsUpdate> {
+  private static final int MAX_UPDATES_PER_LOCK_USE = 99;
+  private volatile HMSPaths paths;
+  private final AtomicLong seqNum = new AtomicLong(0);
+
+  private static Logger LOG = LoggerFactory.getLogger(UpdateableAuthzPaths.class);
+  
+  public UpdateableAuthzPaths(String[] pathPrefixes) {
+    this.paths = new HMSPaths(pathPrefixes);
+  }
+
+  UpdateableAuthzPaths(HMSPaths paths) {
+    this.paths = paths;
+  }
+
+  @Override
+  public boolean isUnderPrefix(String[] pathElements) {
+    return paths.isUnderPrefix(pathElements);
+  }
+
+  @Override
+  public String findAuthzObject(String[] pathElements) {
+    return  paths.findAuthzObject(pathElements);
+  }
+
+  @Override
+  public String findAuthzObjectExactMatch(String[] pathElements) {
+    return  paths.findAuthzObjectExactMatch(pathElements);
+  }
+
+  @Override
+  public UpdateableAuthzPaths updateFull(PathsUpdate update) {
+    UpdateableAuthzPaths other = getPathsDump().initializeFromDump(
+        update.toThrift().getPathsDump());
+    other.seqNum.set(update.getSeqNum());
+    return other;
+  }
+
+  @Override
+  public void updatePartial(Iterable<PathsUpdate> updates, ReadWriteLock lock) {
+    lock.writeLock().lock();
+    try {
+      int counter = 0;
+      for (PathsUpdate update : updates) {
+        applyPartialUpdate(update);
+        if (++counter > MAX_UPDATES_PER_LOCK_USE) {
+          counter = 0;
+          lock.writeLock().unlock();
+          lock.writeLock().lock();
+        }
+        seqNum.set(update.getSeqNum());
+        LOG.debug("##### Updated paths seq Num [" + seqNum.get() + "]");
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  private void applyPartialUpdate(PathsUpdate update) {
+    // Handle alter table rename : will have exactly 2 patch changes
+    // 1 an add path and the other a del path
+    if (update.getPathChanges().size() == 2) {
+      List<TPathChanges> pathChanges = update.getPathChanges();
+      TPathChanges newPathInfo = null;
+      TPathChanges oldPathInfo = null;
+      if ((pathChanges.get(0).getAddPathsSize() == 1)
+        && (pathChanges.get(1).getDelPathsSize() == 1)) {
+        newPathInfo = pathChanges.get(0);
+        oldPathInfo = pathChanges.get(1);
+      } else if ((pathChanges.get(1).getAddPathsSize() == 1)
+          && (pathChanges.get(0).getDelPathsSize() == 1)) {
+        newPathInfo = pathChanges.get(1);
+        oldPathInfo = pathChanges.get(0);
+      }
+      if ((newPathInfo != null)&&(oldPathInfo != null)) {
+        paths.renameAuthzObject(
+            oldPathInfo.getAuthzObj(), oldPathInfo.getDelPaths().get(0),
+            newPathInfo.getAuthzObj(), newPathInfo.getAddPaths().get(0));
+        return;
+      }
+    }
+    for (TPathChanges pathChanges : update.getPathChanges()) {
+      paths.addPathsToAuthzObject(pathChanges.getAuthzObj(), pathChanges
+          .getAddPaths(), true);
+      List<List<String>> delPaths = pathChanges.getDelPaths();
+      if ((delPaths.size() == 1) && (delPaths.get(0).size() == 1)
+          && (delPaths.get(0).get(0).equals(PathsUpdate.ALL_PATHS))) {
+        // Remove all paths.. eg. drop table
+        paths.deleteAuthzObject(pathChanges.getAuthzObj());
+      } else {
+        paths.deletePathsFromAuthzObject(pathChanges.getAuthzObj(), pathChanges
+            .getDelPaths());
+      }
+    }
+  }
+
+  @Override
+  public long getLastUpdatedSeqNum() {
+    return seqNum.get();
+  }
+
+  @Override
+  public PathsUpdate createFullImageUpdate(long currSeqNum) {
+    PathsUpdate pathsUpdate = new PathsUpdate(currSeqNum, true);
+    pathsUpdate.toThrift().setPathsDump(getPathsDump().createPathsDump());
+    return pathsUpdate;
+  }
+
+  @Override
+  public AuthzPathsDumper<UpdateableAuthzPaths> getPathsDump() {
+    return new AuthzPathsDumper<UpdateableAuthzPaths>() {
+
+      @Override
+      public TPathsDump createPathsDump() {
+        return UpdateableAuthzPaths.this.paths.getPathsDump().createPathsDump();
+      }
+
+      @Override
+      public UpdateableAuthzPaths initializeFromDump(TPathsDump pathsDump) {
+        return new UpdateableAuthzPaths(UpdateableAuthzPaths.this.paths
+            .getPathsDump().initializeFromDump(pathsDump));
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift b/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift
new file mode 100644
index 0000000..fb60855
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift
@@ -0,0 +1,87 @@
+#!/usr/local/bin/thrift -java
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#
+# Thrift Service that the MetaStore is built on
+#
+
+include "share/fb303/if/fb303.thrift"
+
+namespace java org.apache.sentry.hdfs.service.thrift
+namespace php sentry.hdfs.thrift
+namespace cpp Apache.Sentry.HDFS.Thrift
+
+struct TPathChanges {
+1: required string authzObj;
+2: required list<list<string>> addPaths;
+3: required list<list<string>> delPaths;
+}
+
+struct TPathEntry {
+1: required byte type;
+2: required string pathElement;
+3: optional string authzObj;
+4: required set<i32> children;
+}
+
+struct TPathsDump {
+1: required i32 rootId;
+2: required map<i32,TPathEntry> nodeMap;
+}
+
+struct TPathsUpdate {
+1: required bool hasFullImage;
+2: optional TPathsDump pathsDump;
+3: required i64 seqNum;
+4: required list<TPathChanges> pathChanges;
+}
+
+struct TPrivilegeChanges {
+1: required string authzObj;
+2: required map<string, string> addPrivileges;
+3: required map<string, string> delPrivileges;
+}
+
+struct TRoleChanges {
+1: required string role;
+2: required list<string> addGroups;
+3: required list<string> delGroups;
+}
+
+struct TPermissionsUpdate {
+1: required bool hasfullImage;
+2: required i64 seqNum;
+3: required map<string, TPrivilegeChanges> privilegeChanges;
+4: required map<string, TRoleChanges> roleChanges; 
+}
+
+struct TAuthzUpdateResponse {
+1: optional list<TPathsUpdate> authzPathUpdate,
+2: optional list<TPermissionsUpdate> authzPermUpdate,
+}
+
+service SentryHDFSService
+{
+  # HMS Path cache
+  void handle_hms_notification(1:TPathsUpdate pathsUpdate);
+  i64 check_hms_seq_num(1:i64 pathSeqNum);
+  TAuthzUpdateResponse get_all_authz_updates_from(1:i64 permSeqNum, 2:i64 pathSeqNum);
+  map<string, list<string>> get_all_related_paths(1:string path, 2:bool exactMatch);
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java
new file mode 100644
index 0000000..29868ae
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java
@@ -0,0 +1,357 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TestHMSPaths {
+
+  @Test
+  public void testGetPathElements() {
+    List<String> as2 = HMSPaths.getPathElements(new String("/a/b"));
+    List<String> as1 = HMSPaths.getPathElements(new String("/a/b"));
+    Assert.assertEquals(as1, as2);
+
+    List<String> as = HMSPaths.getPathElements(new String("/a/b"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+
+    as = HMSPaths.getPathElements(new String("//a/b"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+
+    as = HMSPaths.getPathElements(new String("/a//b"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+
+    as = HMSPaths.getPathElements(new String("/a/b/"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+
+    as = HMSPaths.getPathElements(new String("//a//b//"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+  }
+
+  @Test
+  public void testEntryType() {
+    Assert.assertTrue(HMSPaths.EntryType.DIR.isRemoveIfDangling());
+    Assert.assertFalse(HMSPaths.EntryType.PREFIX.isRemoveIfDangling());
+    Assert.assertFalse(
+        HMSPaths.EntryType.AUTHZ_OBJECT.isRemoveIfDangling());
+  }
+  
+  @Test
+  public void testRootEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    root.toString();
+    Assert.assertNull(root.getParent());
+    Assert.assertEquals(HMSPaths.EntryType.DIR, root.getType());
+    Assert.assertNull(root.getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR, root.getFullPath());
+    Assert.assertTrue(root.getChildren().isEmpty());
+    root.delete();
+    try {
+      root.find(null, true);
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+    try {
+      root.find(new String[0], true);
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+    try {
+      root.find(null, false);
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+    try {
+      root.find(new String[0], false);
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+    Assert.assertNull(root.find(new String[]{"a"}, true));
+    Assert.assertNull(root.find(new String[]{"a"}, false));
+    Assert.assertNull(root.findPrefixEntry(Lists.newArrayList("a")));
+
+    root.delete();
+  }
+
+  @Test
+  public void testRootPrefixEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(true);
+    root.toString();
+
+    Assert.assertNull(root.find(new String[]{"a"}, true));
+    Assert.assertNull(root.find(new String[]{"a"}, false));
+    Assert.assertEquals(root, root.findPrefixEntry(Lists.newArrayList("a")));
+    Assert.assertEquals(root, root.findPrefixEntry(Lists.newArrayList("a", "b")));
+
+    try {
+      root.createPrefix(Lists.newArrayList("a"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+  }
+
+  @Test
+  public void testImmediatePrefixEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry entry = root.createPrefix(Lists.newArrayList("a"));
+    entry.toString();
+    
+    Assert.assertEquals(1, root.getChildren().size());
+
+    Assert.assertEquals(root, entry.getParent());
+    Assert.assertEquals(HMSPaths.EntryType.PREFIX, entry.getType());
+    Assert.assertEquals("a", entry.getPathElement());
+    Assert.assertNull(entry.getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR + "a", entry.getFullPath());
+    Assert.assertTrue(entry.getChildren().isEmpty());
+
+    Assert.assertEquals(entry, root.findPrefixEntry(Lists.newArrayList("a")));
+    Assert.assertEquals(entry, root.findPrefixEntry(Lists.newArrayList("a", "b")));
+
+    Assert.assertNull(root.find(new String[]{"a", "b"}, false));
+
+    Assert.assertNull(root.find(new String[]{"b"}, false));
+    Assert.assertNull(root.findPrefixEntry(Lists.newArrayList("b")));
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b", "c"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    entry.delete();
+    Assert.assertTrue(root.getChildren().isEmpty());
+  }
+
+  @Test
+  public void testFurtherPrefixEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry entry = root.createPrefix(Lists.newArrayList("a", "b"));
+    entry.toString();
+
+    Assert.assertEquals(1, root.getChildren().size());
+
+    Assert.assertEquals(root, entry.getParent().getParent());
+    Assert.assertEquals(HMSPaths.EntryType.PREFIX, entry.getType());
+    Assert.assertEquals(HMSPaths.EntryType.DIR, 
+        entry.getParent().getType());
+    Assert.assertEquals("b", entry.getPathElement());
+    Assert.assertEquals("a", entry.getParent().getPathElement());
+    Assert.assertNull(entry.getAuthzObj());
+    Assert.assertNull(entry.getParent().getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b", 
+        entry.getFullPath());
+    Assert.assertEquals(Path.SEPARATOR + "a", entry.getParent().getFullPath());
+    Assert.assertTrue(entry.getChildren().isEmpty());
+    Assert.assertEquals(1, entry.getParent().getChildren().size());
+
+    Assert.assertEquals(entry, root.findPrefixEntry(Lists.newArrayList("a", "b")));
+    Assert.assertNull(root.findPrefixEntry(Lists.newArrayList("a")));
+
+    Assert.assertNull(root.find(new String[]{"a", "b", "c"}, false));
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b", "c"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    entry.delete();
+    Assert.assertTrue(root.getChildren().isEmpty());
+  }
+
+  @Test
+  public void testImmediateAuthzEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry prefix = root.createPrefix(Lists.newArrayList("a", "b"));
+
+    HMSPaths.Entry entry = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "p1"), "A");
+    Assert.assertEquals(prefix, entry.getParent());
+    Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType());
+    Assert.assertEquals("p1", entry.getPathElement());
+    Assert.assertEquals("A", entry.getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b" +
+        Path.SEPARATOR + "p1", entry.getFullPath());
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b", "p1", "c"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "p1"}, true));
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "p1"}, false));
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "p1", "c"}, 
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "p1", "c"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "p1")));
+
+    root.find(new String[]{"a", "b", "p1"}, true).delete();
+    Assert.assertNull(root.find(new String[]{"a", "b", "p1"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "p1")));
+
+  }
+
+  @Test
+  public void testFurtherAuthzEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry prefix = root.createPrefix(Lists.newArrayList("a", "b"));
+
+    HMSPaths.Entry entry = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "t", "p1"), "A");
+    Assert.assertEquals(prefix, entry.getParent().getParent());
+    Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType());
+    Assert.assertEquals("p1", entry.getPathElement());
+    Assert.assertEquals("A", entry.getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b" +
+        Path.SEPARATOR + "t" + Path.SEPARATOR + "p1", entry.getFullPath());
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b", "p1", "t", "c"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    HMSPaths.Entry ep2 = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "t", "p1", "p2"), "A");
+
+    Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType());
+    Assert.assertEquals("p1", entry.getPathElement());
+    Assert.assertEquals("A", entry.getAuthzObj());
+
+    Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, ep2.getType());
+    Assert.assertEquals("p2", ep2.getPathElement());
+    Assert.assertEquals("A", entry.getAuthzObj());
+
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "t", "p1"},
+        true));
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "t", "p1"},
+        false));
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "t", "p1", "c"},
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1", "c"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1")));
+
+    Assert.assertEquals(ep2, root.find(new String[]{"a", "b", "t", "p1", "p2"},
+        true));
+    Assert.assertEquals(ep2, root.find(new String[]{"a", "b", "t", "p1", "p2"},
+        false));
+    Assert.assertEquals(ep2, root.find(new String[]{"a", "b", "t", "p1", "p2", "c"},
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1", "p2", "c"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1", "p2")));
+
+    root.find(new String[]{"a", "b", "t", "p1"}, false).delete();
+
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"},
+        true));
+    Assert.assertEquals(HMSPaths.EntryType.DIR, entry.getType());
+    Assert.assertNull(entry.getAuthzObj());
+
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1")));
+
+    Assert.assertNotNull(root.find(new String[]{"a", "b", "t", "p1", "p2"}, false));
+    root.find(new String[]{"a", "b", "t", "p1", "p2"}, false).delete();
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1")));
+
+  }
+
+  @Test
+  public void testMultipleAuthzEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry prefix = root.createPrefix(Lists.newArrayList("a", "b"));
+
+    HMSPaths.Entry e1 = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "t", "p1"), "A");
+    HMSPaths.Entry e2 = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "t", "p2"), "A");
+
+
+    Assert.assertEquals(e1, root.find(new String[]{"a", "b", "t", "p1"}, true));
+    Assert.assertEquals(e1, root.find(new String[]{"a", "b", "t", "p1"}, 
+        false));
+    Assert.assertEquals(e1, root.find(new String[]{"a", "b", "t", "p1", "c"},
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1", "c"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1")));
+
+    Assert.assertEquals(e2, root.find(new String[]{"a", "b", "t", "p2"}, true));
+    Assert.assertEquals(e2, root.find(new String[]{"a", "b", "t", "p2"}, 
+        false));
+    Assert.assertEquals(e2, root.find(new String[]{"a", "b", "t", "p2", "c"},
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p2", "c"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p2")));
+
+    root.find(new String[]{"a", "b", "t", "p1"}, true).delete();
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"}, false));
+
+    root.find(new String[]{"a", "b", "t", "p2"}, true).delete();
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p2"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t"}, false));
+
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p3")));
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java
new file mode 100644
index 0000000..2dfe73c
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import junit.framework.Assert;
+
+import org.apache.sentry.hdfs.service.thrift.TPathsDump;
+import org.apache.thrift.TDeserializer;
+import org.apache.thrift.TException;
+import org.apache.thrift.TSerializer;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocolFactory;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TestHMSPathsFullDump {
+  
+  private static boolean useCompact = true;
+
+  @Test
+  public void testDumpAndInitialize() {
+    HMSPaths hmsPaths = new HMSPaths(new String[] {"/user/hive/warehouse", "/user/hive/w2"});
+    hmsPaths._addAuthzObject("db1", Lists.newArrayList("/user/hive/warehouse/db1"));
+    hmsPaths._addAuthzObject("db1.tbl11", Lists.newArrayList("/user/hive/warehouse/db1/tbl11"));
+    hmsPaths._addPathsToAuthzObject("db1.tbl11", Lists.newArrayList(
+        "/user/hive/warehouse/db1/tbl11/part111",
+        "/user/hive/warehouse/db1/tbl11/part112",
+        "/user/hive/warehouse/db1/tbl11/p1=1/p2=x"));
+
+    // Not in prefix paths
+    hmsPaths._addAuthzObject("db2", Lists.newArrayList("/user/hive/w2/db2"));
+    hmsPaths._addAuthzObject("db2.tbl21", Lists.newArrayList("/user/hive/w2/db2/tbl21"));
+    hmsPaths._addPathsToAuthzObject("db2.tbl21", Lists.newArrayList("/user/hive/w2/db2/tbl21/p1=1/p2=x"));
+
+    Assert.assertEquals("db1", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part111"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part112"}, false));
+
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "p1=1", "p2=x"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "p1=1"}, true));
+    Assert.assertEquals("db2.tbl21", hmsPaths.findAuthzObject(new String[]{"user", "hive", "w2", "db2", "tbl21", "p1=1"}, true));
+
+    HMSPathsDumper serDe = hmsPaths.getPathsDump();
+    TPathsDump pathsDump = serDe.createPathsDump();
+    HMSPaths hmsPaths2 = new HMSPaths(new String[] {"/user/hive/warehouse"}).getPathsDump().initializeFromDump(pathsDump);
+
+    Assert.assertEquals("db1", hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part111"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths2.findAuthzObject(new String[]{"user", "hive", "warehouse", "db1", "tbl11", "part112"}, false));
+
+    // This path is not under prefix, so should not be deserialized.. 
+    Assert.assertNull(hmsPaths2.findAuthzObject(new String[]{"user", "hive", "w2", "db2", "tbl21", "p1=1"}, true));
+  }
+
+  @Test
+  public void testThrftSerialization() throws TException {
+    HMSPaths hmsPaths = new HMSPaths(new String[] {"/"});
+    String prefix = "/user/hive/warehouse/";
+    for (int dbNum = 0; dbNum < 10; dbNum++) {
+      String dbName = "db" + dbNum;
+      hmsPaths._addAuthzObject(dbName, Lists.newArrayList(prefix + dbName));
+      for (int tblNum = 0; tblNum < 1000; tblNum++) {
+        String tblName = "tbl" + tblNum;
+        hmsPaths._addAuthzObject(dbName + "." + tblName, Lists.newArrayList(prefix + dbName + "/" + tblName));
+        for (int partNum = 0; partNum < 100; partNum++) {
+          String partName = "part" + partNum;
+          hmsPaths
+              ._addPathsToAuthzObject(
+                  dbName + "." + tblName,
+                  Lists.newArrayList(prefix + dbName + "/" + tblName + "/"
+                      + partName));
+        }
+      }
+    }
+    HMSPathsDumper serDe = hmsPaths.getPathsDump();
+    long t1 = System.currentTimeMillis();
+    TPathsDump pathsDump = serDe.createPathsDump();
+    
+    TProtocolFactory protoFactory = useCompact ? new TCompactProtocol.Factory() : new TBinaryProtocol.Factory(); 
+    byte[] ser = new TSerializer(protoFactory).serialize(pathsDump);
+    long serTime = System.currentTimeMillis() - t1;
+    System.out.println("Serialization Time: " + serTime + ", " + ser.length);
+
+    t1 = System.currentTimeMillis();
+    TPathsDump tPathsDump = new TPathsDump();
+    new TDeserializer(protoFactory).deserialize(tPathsDump, ser);
+    HMSPaths fromDump = serDe.initializeFromDump(tPathsDump);
+    System.out.println("Deserialization Time: " + (System.currentTimeMillis() - t1));
+    Assert.assertEquals("db9.tbl999", fromDump.findAuthzObject(new String[]{"user", "hive", "warehouse", "db9", "tbl999"}, false));
+    Assert.assertEquals("db9.tbl999", fromDump.findAuthzObject(new String[]{"user", "hive", "warehouse", "db9", "tbl999", "part99"}, false));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
new file mode 100644
index 0000000..80b765a
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.sentry.hdfs.service.thrift.TPathChanges;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TestUpdateableAuthzPaths {
+
+  @Test
+  public void testFullUpdate() {
+    HMSPaths hmsPaths = createBaseHMSPaths(1, 1);
+    assertEquals("db1", hmsPaths.findAuthzObjectExactMatch(new String[]{"db1"}));
+    assertEquals("db1.tbl11", hmsPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"}));
+    assertEquals("db1.tbl11", hmsPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"}));
+    assertEquals("db1.tbl11", hmsPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"}));
+
+    UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(hmsPaths);
+    PathsUpdate update = new PathsUpdate(1, true);
+    update.toThrift().setPathsDump(authzPaths.getPathsDump().createPathsDump());
+
+    UpdateableAuthzPaths authzPaths2 = new UpdateableAuthzPaths(new String[] {"/"});
+    UpdateableAuthzPaths pre = authzPaths2.updateFull(update);
+    assertFalse(pre == authzPaths2);
+    authzPaths2 = pre;
+
+    assertEquals("db1", authzPaths2.findAuthzObjectExactMatch(new String[]{"db1"}));
+    assertEquals("db1.tbl11", authzPaths2.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"}));
+    assertEquals("db1.tbl11", authzPaths2.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"}));
+    assertEquals("db1.tbl11", authzPaths2.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"}));
+
+    // Ensure Full Update wipes old stuff
+    UpdateableAuthzPaths authzPaths3 = new UpdateableAuthzPaths(createBaseHMSPaths(2, 1));
+    update = new PathsUpdate(2, true);
+    update.toThrift().setPathsDump(authzPaths3.getPathsDump().createPathsDump());
+    pre = authzPaths2.updateFull(update);
+    assertFalse(pre == authzPaths2);
+    authzPaths2 = pre;
+
+    assertNull(authzPaths2.findAuthzObjectExactMatch(new String[]{"db1"}));
+    assertNull(authzPaths2.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"}));
+
+    assertEquals("db2", authzPaths2.findAuthzObjectExactMatch(new String[]{"db2"}));
+    assertEquals("db2.tbl21", authzPaths2.findAuthzObjectExactMatch(new String[]{"db2", "tbl21"}));
+    assertEquals("db2.tbl21", authzPaths2.findAuthzObjectExactMatch(new String[]{"db2", "tbl21", "part211"}));
+    assertEquals("db2.tbl21", authzPaths2.findAuthzObjectExactMatch(new String[]{"db2", "tbl21", "part212"}));
+  }
+
+  @Test
+  public void testPartialUpdateAddPath() {
+    HMSPaths hmsPaths = createBaseHMSPaths(1, 1);
+    UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(hmsPaths);
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    // Create table
+    PathsUpdate update = new PathsUpdate(2, false);
+    TPathChanges pathChange = update.newPathChange("db1.tbl12");
+    pathChange.addToAddPaths(PathsUpdate.cleanPath("file:///db1/tbl12"));
+    authzPaths.updatePartial(Lists.newArrayList(update), lock);
+    
+    // Add partition
+    update = new PathsUpdate(3, false);
+    pathChange = update.newPathChange("db1.tbl12");
+    pathChange.addToAddPaths(PathsUpdate.cleanPath("file:///db1/tbl12/part121"));
+    authzPaths.updatePartial(Lists.newArrayList(update), lock);
+
+    // Ensure no change in existing Paths
+    assertEquals("db1", authzPaths.findAuthzObjectExactMatch(new String[]{"db1"}));
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"}));
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"}));
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"}));
+
+    // Verify new Paths
+    assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl12"}));
+    assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl12", "part121"}));
+
+    // Rename table
+    update = new PathsUpdate(4, false);
+    update.newPathChange("db1.xtbl11").addToAddPaths(PathsUpdate.cleanPath("file:///db1/xtbl11"));
+    update.newPathChange("db1.tbl11").addToDelPaths(PathsUpdate.cleanPath("file:///db1/tbl11"));
+    authzPaths.updatePartial(Lists.newArrayList(update), lock);
+
+    // Verify name change
+    assertEquals("db1", authzPaths.findAuthzObjectExactMatch(new String[]{"db1"}));
+    assertEquals("db1.xtbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "xtbl11"}));
+    // Explicit set location has to be done on the partition else it will be associated to
+    // the old location
+    assertEquals("db1.xtbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"}));
+    assertEquals("db1.xtbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"}));
+    // Verify other tables are not touched
+    assertNull(authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "xtbl12"}));
+    assertNull(authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "xtbl12", "part121"}));
+    assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl12"}));
+    assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl12", "part121"}));
+
+  }
+
+  @Test
+  public void testPartialUpdateDelPath() {
+    HMSPaths hmsPaths = createBaseHMSPaths(1, 1);
+    UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(hmsPaths);
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11"}));
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"}));
+    
+    // Drop partition
+    PathsUpdate update = new PathsUpdate(2, false);
+    TPathChanges pathChange = update.newPathChange("db1.tbl11");
+    pathChange.addToDelPaths(PathsUpdate.cleanPath("file:///db1/tbl11/part111"));
+    authzPaths.updatePartial(Lists.newArrayList(update), lock);
+
+    // Verify Paths deleted
+    assertNull(authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part111"}));
+
+    // Verify rest ok
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new String[]{"db1", "tbl11", "part112"}));
+  }
+
+  private HMSPaths createBaseHMSPaths(int dbNum, int tblNum) {
+    String db = "db" + dbNum;
+    String tbl = "tbl" + dbNum + "" + tblNum;
+    String fullTbl = db + "." + tbl;
+    String dbPath = "/" + db;
+    String tblPath = "/" + db + "/" + tbl;
+    String partPath = tblPath + "/part" + dbNum + "" + tblNum;
+    HMSPaths hmsPaths = new HMSPaths(new String[] {"/"});
+    hmsPaths._addAuthzObject(db, Lists.newArrayList(dbPath));
+    hmsPaths._addAuthzObject(fullTbl, Lists.newArrayList(tblPath));
+    hmsPaths._addPathsToAuthzObject(fullTbl, Lists.newArrayList(
+        partPath + "1", partPath + "2" ));
+    return hmsPaths;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml b/sentry-hdfs/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml
new file mode 100644
index 0000000..c23a431
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+  <!-- dummy file that gets rewritten by testcases in target test classpath -->
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-dist/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-dist/pom.xml b/sentry-hdfs/sentry-hdfs-dist/pom.xml
new file mode 100644
index 0000000..4bbb212
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-dist/pom.xml
@@ -0,0 +1,79 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.sentry</groupId>
+    <artifactId>sentry-hdfs</artifactId>
+    <version>1.5.0-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>sentry-hdfs-dist</artifactId>
+  <name>Sentry HDFS Dist</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-provider-db</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-service</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-namenode-plugin</artifactId>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-shade-plugin</artifactId>
+        <version>2.1</version>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>shade</goal>
+            </goals>
+            <configuration>
+              <finalName>sentry-hdfs-${project.version}</finalName>
+              <artifactSet>
+                <includes>
+                  <include>org.apache.sentry:sentry-hdfs-common</include>
+                  <include>org.apache.sentry:sentry-hdfs-namenode-plugin</include>
+                  <include>org.apache.sentry:sentry-provider-db</include>
+                </includes>
+              </artifactSet>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore b/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore
new file mode 100644
index 0000000..91ad75b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore
@@ -0,0 +1,18 @@
+*.class
+target/
+.classpath
+.project
+.settings
+.metadata
+.idea/
+*.iml
+derby.log
+datanucleus.log
+sentry-core/sentry-core-common/src/gen
+**/TempStatsStore/
+# Package Files #
+*.jar
+*.war
+*.ear
+test-output/
+maven-repo/

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml b/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml
new file mode 100644
index 0000000..813c2e4
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.sentry</groupId>
+    <artifactId>sentry-hdfs</artifactId>
+    <version>1.5.0-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>sentry-hdfs-namenode-plugin</artifactId>
+  <name>Sentry HDFS Namenode Plugin</name>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-common</artifactId>
+      <version>1.5.0-incubating-SNAPSHOT</version>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java
new file mode 100644
index 0000000..cf33b8b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+public class SentryAuthorizationConstants {
+
+  public static final String CONFIG_FILE = "hdfs-sentry.xml";
+
+  public static final String CONFIG_PREFIX = "sentry.authorization-provider.";
+
+  public static final String HDFS_USER_KEY = CONFIG_PREFIX + "hdfs-user";
+  public static final String HDFS_USER_DEFAULT = "hive";
+
+  public static final String HDFS_GROUP_KEY = CONFIG_PREFIX + "hdfs-group";
+  public static final String HDFS_GROUP_DEFAULT = "hive";
+
+  public static final String HDFS_PERMISSION_KEY = CONFIG_PREFIX + 
+      "hdfs-permission";
+  public static final long HDFS_PERMISSION_DEFAULT = 0770;
+
+  public static final String HDFS_PATH_PREFIXES_KEY = CONFIG_PREFIX + 
+      "hdfs-path-prefixes";
+  public static final String[] HDFS_PATH_PREFIXES_DEFAULT = new String[0];
+
+  public static final String CACHE_REFRESH_INTERVAL_KEY = CONFIG_PREFIX + 
+      "cache-refresh-interval.ms";
+  public static final int CACHE_REFRESH_INTERVAL_DEFAULT = 500;
+
+  public static final String CACHE_STALE_THRESHOLD_KEY = CONFIG_PREFIX + 
+      "cache-stale-threshold.ms";
+  public static final int CACHE_STALE_THRESHOLD_DEFAULT = 60 * 1000;
+
+  public static final String CACHE_REFRESH_RETRY_WAIT_KEY = CONFIG_PREFIX +
+      "cache-refresh-retry-wait.ms";
+  public static final int CACHE_REFRESH_RETRY_WAIT_DEFAULT = 30 * 1000;
+
+  public static final String INCLUDE_HDFS_AUTHZ_AS_ACL_KEY = CONFIG_PREFIX + 
+      "include-hdfs-authz-as-acl";
+  public static final boolean INCLUDE_HDFS_AUTHZ_AS_ACL_DEFAULT = false;
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java
new file mode 100644
index 0000000..3081ae1
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.sentry.hdfs.SentryHDFSServiceClient.SentryAuthzUpdate;
+import org.apache.sentry.hdfs.Updateable.Update;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class SentryAuthorizationInfo implements Runnable {
+  private static Logger LOG =
+      LoggerFactory.getLogger(SentryAuthorizationInfo.class);
+
+  private SentryUpdater updater;
+  private volatile UpdateableAuthzPaths authzPaths;
+  private volatile UpdateableAuthzPermissions authzPermissions;
+
+  private int refreshIntervalMillisec;
+  private int staleThresholdMillisec;
+  private int retryWaitMillisec;
+  private ScheduledExecutorService executor;
+  private volatile long lastUpdate;
+  private volatile long waitUntil;
+  private volatile long lastStaleReport;
+  // We don't need a re-entrant lock.. but we do need a ReadWriteLock
+  // Unfortunately, the ReentrantReadWriteLick is the only available
+  // concrete implementation of a ReadWriteLock.
+  private final ReadWriteLock lock = new ReentrantReadWriteLock();
+
+  @VisibleForTesting
+  SentryAuthorizationInfo() {}
+
+  public SentryAuthorizationInfo(Configuration conf) throws Exception {
+    String[] pathPrefixes = conf.getTrimmedStrings(
+        SentryAuthorizationConstants.HDFS_PATH_PREFIXES_KEY, 
+        SentryAuthorizationConstants.HDFS_PATH_PREFIXES_DEFAULT);
+    if (pathPrefixes.length == 0) {
+      LOG.warn("There are not HDFS path prefixes configured in [{}], "
+          + "Sentry authorization won't be enforced on any HDFS location",
+          SentryAuthorizationConstants.HDFS_PATH_PREFIXES_KEY);
+    } else {
+      refreshIntervalMillisec = conf.getInt(
+          SentryAuthorizationConstants.CACHE_REFRESH_INTERVAL_KEY,
+          SentryAuthorizationConstants.CACHE_REFRESH_INTERVAL_DEFAULT);
+      staleThresholdMillisec = conf.getInt(
+          SentryAuthorizationConstants.CACHE_STALE_THRESHOLD_KEY,
+          SentryAuthorizationConstants.CACHE_STALE_THRESHOLD_DEFAULT);
+      retryWaitMillisec = conf.getInt(
+          SentryAuthorizationConstants.CACHE_REFRESH_RETRY_WAIT_KEY,
+          SentryAuthorizationConstants.CACHE_REFRESH_RETRY_WAIT_DEFAULT);
+
+      LOG.debug("Sentry authorization will enforced in the following HDFS " +
+          "locations: [{}]", StringUtils.arrayToString(pathPrefixes));
+      LOG.debug("Refresh interval [{}]ms, retry wait [{}], stale threshold " +
+              "[{}]ms", new Object[] 
+          {refreshIntervalMillisec, retryWaitMillisec, staleThresholdMillisec});
+
+      authzPaths = new UpdateableAuthzPaths(pathPrefixes);
+      authzPermissions = new UpdateableAuthzPermissions();
+      waitUntil = System.currentTimeMillis();
+      lastStaleReport = 0;
+      updater = new SentryUpdater(conf, this);
+    }
+  }
+
+  UpdateableAuthzPaths getAuthzPaths() {
+    return authzPaths;
+  }
+
+  UpdateableAuthzPermissions getAuthzPermissions() {
+    return authzPermissions;
+  }
+
+  private void update() {
+    SentryAuthzUpdate updates = updater.getUpdates();
+    UpdateableAuthzPaths newAuthzPaths = processUpdates(
+        updates.getPathUpdates(), authzPaths);
+    UpdateableAuthzPermissions newAuthzPerms = processUpdates(
+        updates.getPermUpdates(), authzPermissions);
+    // If there were any FULL updates the returned instance would be
+    // different
+    if ((newAuthzPaths != authzPaths)||(newAuthzPerms != authzPermissions)) {
+      lock.writeLock().lock();
+      try {
+        authzPaths = newAuthzPaths;
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("FULL Updated paths seq Num [" + authzPaths.getLastUpdatedSeqNum() + "]");
+        }
+        authzPermissions = newAuthzPerms;
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("FULL Updated perms seq Num [" + authzPermissions.getLastUpdatedSeqNum() + "]");
+        }
+      } finally {
+        lock.writeLock().unlock();
+      }
+    }
+
+  }
+
+  private <K extends Update, V extends Updateable<K>> V processUpdates(List<K> updates,
+      V updateable) {
+    // In a list of Updates, if there is a full Update, it will be the first
+    // one in the List.. all the remaining will be partial updates
+    if (updates.size() > 0) {
+      if (updates.get(0).hasFullImage()) {
+        updateable = (V)updateable.updateFull(updates.remove(0));
+      }
+      // Any more elements ?
+      if (!updates.isEmpty()) {
+        updateable.updatePartial(updates, lock);
+      }
+    }
+    return updateable;
+  }
+
+  public void run() {
+    try {
+      // In case of previous preUpdate failure, we sleep for a retry wait 
+      // interval we can do this because we are using a singledthreadedexecutor
+      // and scheduling the runs with fixed delay.
+      long currTime = System.currentTimeMillis();
+      if (waitUntil > currTime) {
+        Thread.sleep(waitUntil - currTime);
+      }
+      update();
+      // we reset lastUpdate only on successful pulling
+      lastUpdate = System.currentTimeMillis();
+      waitUntil = lastUpdate;
+    } catch (Exception ex) {
+      LOG.warn("Failed to update, will retry in [{}]ms, error: ", 
+          new Object[]{ retryWaitMillisec, ex.getMessage(), ex});
+      waitUntil = System.currentTimeMillis() + retryWaitMillisec;
+    }
+  }
+
+  public void start() {
+    if (authzPaths != null) {
+      try {
+        update();
+      } catch (Exception ex) {
+        LOG.warn("Failed to do initial update, will retry in [{}]ms, error: ",
+            new Object[]{retryWaitMillisec, ex.getMessage(), ex});
+        waitUntil = System.currentTimeMillis() + retryWaitMillisec;
+      }
+      executor = Executors.newSingleThreadScheduledExecutor(
+          new ThreadFactory() {
+            @Override
+            public Thread newThread(Runnable r) {
+              Thread t = new Thread(r, SentryAuthorizationInfo.class.getName() +
+                  "-refresher");
+              t.setDaemon(true);
+              return t;
+            }
+          }
+      );
+      executor.scheduleWithFixedDelay(this, refreshIntervalMillisec, 
+          refreshIntervalMillisec, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  public void stop() {
+    if (authzPaths != null) {
+      executor.shutdownNow();
+    }
+  }
+
+  public boolean isStale() {
+    long now = System.currentTimeMillis();
+    boolean stale = now - lastUpdate > staleThresholdMillisec;
+    if (stale && now - lastStaleReport > retryWaitMillisec) {
+      LOG.warn("Authorization information has been stale for [{}]s", 
+          (now - lastUpdate) / 1000);
+      lastStaleReport = now;
+    }
+    return stale;
+  }
+
+  public boolean isManaged(String[] pathElements) {
+    lock.readLock().lock();
+    try {
+      return authzPaths.isUnderPrefix(pathElements);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  public boolean doesBelongToAuthzObject(String[] pathElements) {
+    lock.readLock().lock();
+    try {
+      return authzPaths.findAuthzObject(pathElements) != null;
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  public List<AclEntry> getAclEntries(String[] pathElements) {
+    lock.readLock().lock();
+    try {
+      String authzObj = authzPaths.findAuthzObject(pathElements);
+      return (authzObj != null) ? authzPermissions.getAcls(authzObj) 
+          : Collections.EMPTY_LIST;
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java
new file mode 100644
index 0000000..7d2940c
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java
@@ -0,0 +1,372 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permission and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.namenode.AclFeature;
+import org.apache.hadoop.hdfs.server.namenode.AuthorizationProvider;
+import org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider;
+import org.apache.hadoop.security.AccessControlException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+
+public class SentryAuthorizationProvider 
+    extends AuthorizationProvider implements Configurable {
+  
+  static class SentryAclFeature extends AclFeature {
+    public SentryAclFeature(ImmutableList<AclEntry> entries) {
+      super(entries);
+    }
+  }
+
+  private static Logger LOG = 
+      LoggerFactory.getLogger(SentryAuthorizationProvider.class);
+
+  private boolean started;
+  private Configuration conf;
+  private AuthorizationProvider defaultAuthzProvider;
+  private String user;
+  private String group;
+  private FsPermission permission;
+  private boolean originalAuthzAsAcl;
+  private SentryAuthorizationInfo authzInfo;
+
+  public SentryAuthorizationProvider() {
+    this(null);
+  }
+
+  @VisibleForTesting
+  SentryAuthorizationProvider(SentryAuthorizationInfo authzInfo) {
+    this.authzInfo = authzInfo;
+  }
+  
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public synchronized void start() {
+    if (started) {
+      throw new IllegalStateException("Provider already started");
+    }
+    started = true;
+    try {
+      if (!conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false)) {
+        throw new RuntimeException("HDFS ACLs must be enabled");
+      }
+
+      defaultAuthzProvider = new DefaultAuthorizationProvider();
+      defaultAuthzProvider.start();
+      // Configuration is read from hdfs-sentry.xml and NN configuration, in
+      // that order of precedence.
+      Configuration conf = new Configuration(this.conf);
+      conf.addResource(SentryAuthorizationConstants.CONFIG_FILE);
+      user = conf.get(SentryAuthorizationConstants.HDFS_USER_KEY,
+          SentryAuthorizationConstants.HDFS_USER_DEFAULT);
+      group = conf.get(SentryAuthorizationConstants.HDFS_GROUP_KEY,
+          SentryAuthorizationConstants.HDFS_GROUP_DEFAULT);
+      permission = FsPermission.createImmutable(
+          (short) conf.getLong(SentryAuthorizationConstants.HDFS_PERMISSION_KEY,
+              SentryAuthorizationConstants.HDFS_PERMISSION_DEFAULT)
+      );
+      originalAuthzAsAcl = conf.getBoolean(
+          SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_KEY,
+          SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_DEFAULT);
+
+      LOG.info("Starting");
+      LOG.info("Config: hdfs-user[{}] hdfs-group[{}] hdfs-permission[{}] " +
+          "include-hdfs-authz-as-acl[{}]", new Object[]
+          {user, group, permission, originalAuthzAsAcl});
+
+      if (authzInfo == null) {
+        authzInfo = new SentryAuthorizationInfo(conf);
+      }
+      authzInfo.start();
+    } catch (Exception ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  @Override
+  public synchronized void stop() {
+    LOG.debug("Stopping");
+    authzInfo.stop();
+    defaultAuthzProvider.stop();
+    defaultAuthzProvider = null;
+  }
+
+  @Override
+  public void setSnaphottableDirs(Map<INodeAuthorizationInfo, Integer>
+      snapshotableDirs) {
+    defaultAuthzProvider.setSnaphottableDirs(snapshotableDirs);
+  }
+
+  @Override
+  public void addSnapshottable(INodeAuthorizationInfo dir) {
+    defaultAuthzProvider.addSnapshottable(dir);
+  }
+
+  @Override
+  public void removeSnapshottable(INodeAuthorizationInfo dir) {
+    defaultAuthzProvider.removeSnapshottable(dir);
+  }
+
+  @Override
+  public void createSnapshot(INodeAuthorizationInfo dir, int snapshotId)
+      throws IOException{
+    defaultAuthzProvider.createSnapshot(dir, snapshotId);
+  }
+
+  @Override
+  public void removeSnapshot(INodeAuthorizationInfo dir, int snapshotId)
+      throws IOException {
+    defaultAuthzProvider.removeSnapshot(dir, snapshotId);
+  }
+
+  @Override
+  public void checkPermission(String user, Set<String> groups,
+      INodeAuthorizationInfo[] inodes, int snapshotId,
+      boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess,
+      FsAction access, FsAction subAccess, boolean ignoreEmptyDir)
+      throws AccessControlException, UnresolvedLinkException {
+    defaultAuthzProvider.checkPermission(user, groups, inodes, snapshotId,
+        doCheckOwner, ancestorAccess, parentAccess, access, subAccess,
+        ignoreEmptyDir);
+  }
+
+  private static final String[] EMPTY_STRING_ARRAY = new String[0];
+  
+  private String[] getPathElements(INodeAuthorizationInfo node) {
+    return getPathElements(node, 0);
+  }
+
+  private String[] getPathElements(INodeAuthorizationInfo node, int idx) {
+    String[] paths;
+    INodeAuthorizationInfo parent = node.getParent();
+    if (parent == null) {
+      paths = (idx > 0) ? new String[idx] : EMPTY_STRING_ARRAY;
+    } else {
+      paths = getPathElements(parent, idx + 1);
+      paths[paths.length - 1 - idx] = node.getLocalName();
+    }
+    return paths;
+  }
+
+  @Override
+  public void setUser(INodeAuthorizationInfo node, String user) {
+    defaultAuthzProvider.setUser(node, user);
+  }
+
+  @Override
+  public String getUser(INodeAuthorizationInfo node, int snapshotId) {
+    String user;
+    String[] pathElements = getPathElements(node);
+    if (!authzInfo.isManaged(pathElements)) {
+      user = defaultAuthzProvider.getUser(node, snapshotId);
+    } else {
+      if (!authzInfo.isStale()) {
+        if (authzInfo.doesBelongToAuthzObject(pathElements)) {
+          user = this.user;
+        } else {
+          user = defaultAuthzProvider.getUser(node, snapshotId);
+        }
+      } else {
+        user = this.user;
+      }
+    }
+    return user;
+  }
+
+  @Override
+  public void setGroup(INodeAuthorizationInfo node, String group) {
+    defaultAuthzProvider.setGroup(node, group);
+  }
+
+  @Override
+  public String getGroup(INodeAuthorizationInfo node, int snapshotId) {
+    String group;
+    String[] pathElements = getPathElements(node);
+    if (!authzInfo.isManaged(pathElements)) {
+      group = defaultAuthzProvider.getGroup(node, snapshotId);
+    } else {
+      if (!authzInfo.isStale()) {
+        if (authzInfo.doesBelongToAuthzObject(pathElements)) {
+          group = this.group;
+        } else {
+          group = defaultAuthzProvider.getGroup(node, snapshotId);
+        }
+      } else {
+        group = this.group;
+      }
+    }
+    return group;
+  }
+
+  @Override
+  public void setPermission(INodeAuthorizationInfo node,
+      FsPermission permission) {
+    defaultAuthzProvider.setPermission(node, permission);
+  }
+
+  @Override
+  public FsPermission getFsPermission(
+      INodeAuthorizationInfo node, int snapshotId) {
+    FsPermission permission;
+    String[] pathElements = getPathElements(node);
+    if (!authzInfo.isManaged(pathElements)) {
+      permission = defaultAuthzProvider.getFsPermission(node, snapshotId);
+    } else {
+      if (!authzInfo.isStale()) {
+        if (authzInfo.doesBelongToAuthzObject(pathElements)) {
+          permission = this.permission;
+        } else {
+          permission = defaultAuthzProvider.getFsPermission(node, snapshotId);
+        }
+      } else {
+        permission = this.permission;
+      }
+    }
+    return permission;
+  }
+
+  private List<AclEntry> createAclEntries(String user, String group,
+      FsPermission permission) {
+    List<AclEntry> list = new ArrayList<AclEntry>();
+    AclEntry.Builder builder = new AclEntry.Builder();
+    FsPermission fsPerm = new FsPermission(permission);
+    builder.setName(user);
+    builder.setType(AclEntryType.USER);
+    builder.setScope(AclEntryScope.ACCESS);
+    builder.setPermission(fsPerm.getUserAction());
+    list.add(builder.build());
+    builder.setName(group);
+    builder.setType(AclEntryType.GROUP);
+    builder.setScope(AclEntryScope.ACCESS);
+    builder.setPermission(fsPerm.getGroupAction());
+    list.add(builder.build());
+    builder.setName(null);
+    builder.setType(AclEntryType.OTHER);
+    builder.setScope(AclEntryScope.ACCESS);
+    builder.setPermission(fsPerm.getOtherAction());
+    list.add(builder.build());
+    return list;
+  }
+
+  @Override
+  public AclFeature getAclFeature(INodeAuthorizationInfo node, int snapshotId) {
+    AclFeature f = null;
+    String[] pathElements = getPathElements(node);
+    String p = Arrays.toString(pathElements);
+    boolean isManaged = false;
+    boolean isStale = false;
+    boolean hasAuthzObj = false;
+    if (!authzInfo.isManaged(pathElements)) {
+      isManaged = false;
+      f = defaultAuthzProvider.getAclFeature(node, snapshotId);
+    } else {
+      isManaged = true;
+      List<AclEntry> list = new ArrayList<AclEntry>();
+      if (originalAuthzAsAcl) {
+        String user = defaultAuthzProvider.getUser(node, snapshotId);
+        String group = defaultAuthzProvider.getGroup(node, snapshotId);
+        INodeAuthorizationInfo pNode = node.getParent();
+        while  (group == null && pNode != null) {
+          group = defaultAuthzProvider.getGroup(pNode, snapshotId);
+          pNode = pNode.getParent();
+        }
+        FsPermission perm = defaultAuthzProvider.getFsPermission(node, snapshotId);
+        list.addAll(createAclEntries(user, group, perm));
+      } else {
+        list.addAll(createAclEntries(this.user, this.group, this.permission));
+      }
+      if (!authzInfo.isStale()) { 
+        isStale = false;
+        if (authzInfo.doesBelongToAuthzObject(pathElements)) {
+          hasAuthzObj = true;
+          list.addAll(authzInfo.getAclEntries(pathElements));
+          f = new SentryAclFeature(ImmutableList.copyOf(list));
+        } else {
+          hasAuthzObj = false;
+          f = defaultAuthzProvider.getAclFeature(node, snapshotId);
+        }
+      } else {
+        isStale = true;
+        f = new SentryAclFeature(ImmutableList.copyOf(list));
+      }
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("### getAclEntry [" + (p == null ? "null" : p) + "] : ["
+          + "isManaged=" + isManaged
+          + ", isStale=" + isStale
+          + ", hasAuthzObj=" + hasAuthzObj
+          + ", origAuthzAsAcl=" + originalAuthzAsAcl + "]"
+          + "[" + (f == null ? "null" : f.getEntries()) + "]");
+    }
+    return f;
+  }
+
+  @Override
+  public void removeAclFeature(INodeAuthorizationInfo node) {
+    AclFeature aclFeature = node.getAclFeature(CURRENT_STATE_ID);
+    if (aclFeature.getClass() != SentryAclFeature.class) {
+      defaultAuthzProvider.removeAclFeature(node);
+    }
+  }
+
+  @Override
+  public void addAclFeature(INodeAuthorizationInfo node, AclFeature f) {
+    String[] pathElements = getPathElements(node);
+    if (!authzInfo.isManaged(pathElements)) {
+      defaultAuthzProvider.addAclFeature(node, f);
+    }
+  }
+
+//  @Override 
+//  public boolean doesAllowChanges(INodeAuthorizationInfo node) {
+//    String[] pathElements = getPathElements(node);
+//    if (!authzInfo.isManaged(pathElements)) {
+//      return defaultAuthzProvider.doesAllowChanges(node);
+//    }
+//    return !authzInfo.doesBelongToAuthzObject(getPathElements(node));
+//  }
+
+}


[6/9] incubator-sentry git commit: SENTRY-432: Synchronization of HDFS permissions to Sentry permissions (Arun Suresh via Lenni Kuff)

Posted by ls...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsDump.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsDump.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsDump.java
new file mode 100644
index 0000000..200ecad
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsDump.java
@@ -0,0 +1,549 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.sentry.hdfs.service.thrift;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TPathsDump implements org.apache.thrift.TBase<TPathsDump, TPathsDump._Fields>, java.io.Serializable, Cloneable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPathsDump");
+
+  private static final org.apache.thrift.protocol.TField ROOT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("rootId", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField NODE_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("nodeMap", org.apache.thrift.protocol.TType.MAP, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TPathsDumpStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TPathsDumpTupleSchemeFactory());
+  }
+
+  private int rootId; // required
+  private Map<Integer,TPathEntry> nodeMap; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    ROOT_ID((short)1, "rootId"),
+    NODE_MAP((short)2, "nodeMap");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // ROOT_ID
+          return ROOT_ID;
+        case 2: // NODE_MAP
+          return NODE_MAP;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ROOTID_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.ROOT_ID, new org.apache.thrift.meta_data.FieldMetaData("rootId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.NODE_MAP, new org.apache.thrift.meta_data.FieldMetaData("nodeMap", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32), 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPathEntry.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPathsDump.class, metaDataMap);
+  }
+
+  public TPathsDump() {
+  }
+
+  public TPathsDump(
+    int rootId,
+    Map<Integer,TPathEntry> nodeMap)
+  {
+    this();
+    this.rootId = rootId;
+    setRootIdIsSet(true);
+    this.nodeMap = nodeMap;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TPathsDump(TPathsDump other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.rootId = other.rootId;
+    if (other.isSetNodeMap()) {
+      Map<Integer,TPathEntry> __this__nodeMap = new HashMap<Integer,TPathEntry>();
+      for (Map.Entry<Integer, TPathEntry> other_element : other.nodeMap.entrySet()) {
+
+        Integer other_element_key = other_element.getKey();
+        TPathEntry other_element_value = other_element.getValue();
+
+        Integer __this__nodeMap_copy_key = other_element_key;
+
+        TPathEntry __this__nodeMap_copy_value = new TPathEntry(other_element_value);
+
+        __this__nodeMap.put(__this__nodeMap_copy_key, __this__nodeMap_copy_value);
+      }
+      this.nodeMap = __this__nodeMap;
+    }
+  }
+
+  public TPathsDump deepCopy() {
+    return new TPathsDump(this);
+  }
+
+  @Override
+  public void clear() {
+    setRootIdIsSet(false);
+    this.rootId = 0;
+    this.nodeMap = null;
+  }
+
+  public int getRootId() {
+    return this.rootId;
+  }
+
+  public void setRootId(int rootId) {
+    this.rootId = rootId;
+    setRootIdIsSet(true);
+  }
+
+  public void unsetRootId() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ROOTID_ISSET_ID);
+  }
+
+  /** Returns true if field rootId is set (has been assigned a value) and false otherwise */
+  public boolean isSetRootId() {
+    return EncodingUtils.testBit(__isset_bitfield, __ROOTID_ISSET_ID);
+  }
+
+  public void setRootIdIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ROOTID_ISSET_ID, value);
+  }
+
+  public int getNodeMapSize() {
+    return (this.nodeMap == null) ? 0 : this.nodeMap.size();
+  }
+
+  public void putToNodeMap(int key, TPathEntry val) {
+    if (this.nodeMap == null) {
+      this.nodeMap = new HashMap<Integer,TPathEntry>();
+    }
+    this.nodeMap.put(key, val);
+  }
+
+  public Map<Integer,TPathEntry> getNodeMap() {
+    return this.nodeMap;
+  }
+
+  public void setNodeMap(Map<Integer,TPathEntry> nodeMap) {
+    this.nodeMap = nodeMap;
+  }
+
+  public void unsetNodeMap() {
+    this.nodeMap = null;
+  }
+
+  /** Returns true if field nodeMap is set (has been assigned a value) and false otherwise */
+  public boolean isSetNodeMap() {
+    return this.nodeMap != null;
+  }
+
+  public void setNodeMapIsSet(boolean value) {
+    if (!value) {
+      this.nodeMap = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case ROOT_ID:
+      if (value == null) {
+        unsetRootId();
+      } else {
+        setRootId((Integer)value);
+      }
+      break;
+
+    case NODE_MAP:
+      if (value == null) {
+        unsetNodeMap();
+      } else {
+        setNodeMap((Map<Integer,TPathEntry>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case ROOT_ID:
+      return Integer.valueOf(getRootId());
+
+    case NODE_MAP:
+      return getNodeMap();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case ROOT_ID:
+      return isSetRootId();
+    case NODE_MAP:
+      return isSetNodeMap();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TPathsDump)
+      return this.equals((TPathsDump)that);
+    return false;
+  }
+
+  public boolean equals(TPathsDump that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_rootId = true;
+    boolean that_present_rootId = true;
+    if (this_present_rootId || that_present_rootId) {
+      if (!(this_present_rootId && that_present_rootId))
+        return false;
+      if (this.rootId != that.rootId)
+        return false;
+    }
+
+    boolean this_present_nodeMap = true && this.isSetNodeMap();
+    boolean that_present_nodeMap = true && that.isSetNodeMap();
+    if (this_present_nodeMap || that_present_nodeMap) {
+      if (!(this_present_nodeMap && that_present_nodeMap))
+        return false;
+      if (!this.nodeMap.equals(that.nodeMap))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_rootId = true;
+    builder.append(present_rootId);
+    if (present_rootId)
+      builder.append(rootId);
+
+    boolean present_nodeMap = true && (isSetNodeMap());
+    builder.append(present_nodeMap);
+    if (present_nodeMap)
+      builder.append(nodeMap);
+
+    return builder.toHashCode();
+  }
+
+  public int compareTo(TPathsDump other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+    TPathsDump typedOther = (TPathsDump)other;
+
+    lastComparison = Boolean.valueOf(isSetRootId()).compareTo(typedOther.isSetRootId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRootId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rootId, typedOther.rootId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNodeMap()).compareTo(typedOther.isSetNodeMap());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNodeMap()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nodeMap, typedOther.nodeMap);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TPathsDump(");
+    boolean first = true;
+
+    sb.append("rootId:");
+    sb.append(this.rootId);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("nodeMap:");
+    if (this.nodeMap == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.nodeMap);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetRootId()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'rootId' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNodeMap()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'nodeMap' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TPathsDumpStandardSchemeFactory implements SchemeFactory {
+    public TPathsDumpStandardScheme getScheme() {
+      return new TPathsDumpStandardScheme();
+    }
+  }
+
+  private static class TPathsDumpStandardScheme extends StandardScheme<TPathsDump> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TPathsDump struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // ROOT_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.rootId = iprot.readI32();
+              struct.setRootIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // NODE_MAP
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map40 = iprot.readMapBegin();
+                struct.nodeMap = new HashMap<Integer,TPathEntry>(2*_map40.size);
+                for (int _i41 = 0; _i41 < _map40.size; ++_i41)
+                {
+                  int _key42; // required
+                  TPathEntry _val43; // required
+                  _key42 = iprot.readI32();
+                  _val43 = new TPathEntry();
+                  _val43.read(iprot);
+                  struct.nodeMap.put(_key42, _val43);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setNodeMapIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TPathsDump struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(ROOT_ID_FIELD_DESC);
+      oprot.writeI32(struct.rootId);
+      oprot.writeFieldEnd();
+      if (struct.nodeMap != null) {
+        oprot.writeFieldBegin(NODE_MAP_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, struct.nodeMap.size()));
+          for (Map.Entry<Integer, TPathEntry> _iter44 : struct.nodeMap.entrySet())
+          {
+            oprot.writeI32(_iter44.getKey());
+            _iter44.getValue().write(oprot);
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TPathsDumpTupleSchemeFactory implements SchemeFactory {
+    public TPathsDumpTupleScheme getScheme() {
+      return new TPathsDumpTupleScheme();
+    }
+  }
+
+  private static class TPathsDumpTupleScheme extends TupleScheme<TPathsDump> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TPathsDump struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI32(struct.rootId);
+      {
+        oprot.writeI32(struct.nodeMap.size());
+        for (Map.Entry<Integer, TPathEntry> _iter45 : struct.nodeMap.entrySet())
+        {
+          oprot.writeI32(_iter45.getKey());
+          _iter45.getValue().write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TPathsDump struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.rootId = iprot.readI32();
+      struct.setRootIdIsSet(true);
+      {
+        org.apache.thrift.protocol.TMap _map46 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.nodeMap = new HashMap<Integer,TPathEntry>(2*_map46.size);
+        for (int _i47 = 0; _i47 < _map46.size; ++_i47)
+        {
+          int _key48; // required
+          TPathEntry _val49; // required
+          _key48 = iprot.readI32();
+          _val49 = new TPathEntry();
+          _val49.read(iprot);
+          struct.nodeMap.put(_key48, _val49);
+        }
+      }
+      struct.setNodeMapIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsUpdate.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsUpdate.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsUpdate.java
new file mode 100644
index 0000000..d0ee6b6
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPathsUpdate.java
@@ -0,0 +1,748 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.sentry.hdfs.service.thrift;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TPathsUpdate implements org.apache.thrift.TBase<TPathsUpdate, TPathsUpdate._Fields>, java.io.Serializable, Cloneable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPathsUpdate");
+
+  private static final org.apache.thrift.protocol.TField HAS_FULL_IMAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("hasFullImage", org.apache.thrift.protocol.TType.BOOL, (short)1);
+  private static final org.apache.thrift.protocol.TField PATHS_DUMP_FIELD_DESC = new org.apache.thrift.protocol.TField("pathsDump", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+  private static final org.apache.thrift.protocol.TField SEQ_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("seqNum", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField PATH_CHANGES_FIELD_DESC = new org.apache.thrift.protocol.TField("pathChanges", org.apache.thrift.protocol.TType.LIST, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TPathsUpdateStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TPathsUpdateTupleSchemeFactory());
+  }
+
+  private boolean hasFullImage; // required
+  private TPathsDump pathsDump; // optional
+  private long seqNum; // required
+  private List<TPathChanges> pathChanges; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    HAS_FULL_IMAGE((short)1, "hasFullImage"),
+    PATHS_DUMP((short)2, "pathsDump"),
+    SEQ_NUM((short)3, "seqNum"),
+    PATH_CHANGES((short)4, "pathChanges");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // HAS_FULL_IMAGE
+          return HAS_FULL_IMAGE;
+        case 2: // PATHS_DUMP
+          return PATHS_DUMP;
+        case 3: // SEQ_NUM
+          return SEQ_NUM;
+        case 4: // PATH_CHANGES
+          return PATH_CHANGES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __HASFULLIMAGE_ISSET_ID = 0;
+  private static final int __SEQNUM_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private _Fields optionals[] = {_Fields.PATHS_DUMP};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.HAS_FULL_IMAGE, new org.apache.thrift.meta_data.FieldMetaData("hasFullImage", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.PATHS_DUMP, new org.apache.thrift.meta_data.FieldMetaData("pathsDump", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPathsDump.class)));
+    tmpMap.put(_Fields.SEQ_NUM, new org.apache.thrift.meta_data.FieldMetaData("seqNum", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.PATH_CHANGES, new org.apache.thrift.meta_data.FieldMetaData("pathChanges", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPathChanges.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPathsUpdate.class, metaDataMap);
+  }
+
+  public TPathsUpdate() {
+  }
+
+  public TPathsUpdate(
+    boolean hasFullImage,
+    long seqNum,
+    List<TPathChanges> pathChanges)
+  {
+    this();
+    this.hasFullImage = hasFullImage;
+    setHasFullImageIsSet(true);
+    this.seqNum = seqNum;
+    setSeqNumIsSet(true);
+    this.pathChanges = pathChanges;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TPathsUpdate(TPathsUpdate other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.hasFullImage = other.hasFullImage;
+    if (other.isSetPathsDump()) {
+      this.pathsDump = new TPathsDump(other.pathsDump);
+    }
+    this.seqNum = other.seqNum;
+    if (other.isSetPathChanges()) {
+      List<TPathChanges> __this__pathChanges = new ArrayList<TPathChanges>();
+      for (TPathChanges other_element : other.pathChanges) {
+        __this__pathChanges.add(new TPathChanges(other_element));
+      }
+      this.pathChanges = __this__pathChanges;
+    }
+  }
+
+  public TPathsUpdate deepCopy() {
+    return new TPathsUpdate(this);
+  }
+
+  @Override
+  public void clear() {
+    setHasFullImageIsSet(false);
+    this.hasFullImage = false;
+    this.pathsDump = null;
+    setSeqNumIsSet(false);
+    this.seqNum = 0;
+    this.pathChanges = null;
+  }
+
+  public boolean isHasFullImage() {
+    return this.hasFullImage;
+  }
+
+  public void setHasFullImage(boolean hasFullImage) {
+    this.hasFullImage = hasFullImage;
+    setHasFullImageIsSet(true);
+  }
+
+  public void unsetHasFullImage() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HASFULLIMAGE_ISSET_ID);
+  }
+
+  /** Returns true if field hasFullImage is set (has been assigned a value) and false otherwise */
+  public boolean isSetHasFullImage() {
+    return EncodingUtils.testBit(__isset_bitfield, __HASFULLIMAGE_ISSET_ID);
+  }
+
+  public void setHasFullImageIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HASFULLIMAGE_ISSET_ID, value);
+  }
+
+  public TPathsDump getPathsDump() {
+    return this.pathsDump;
+  }
+
+  public void setPathsDump(TPathsDump pathsDump) {
+    this.pathsDump = pathsDump;
+  }
+
+  public void unsetPathsDump() {
+    this.pathsDump = null;
+  }
+
+  /** Returns true if field pathsDump is set (has been assigned a value) and false otherwise */
+  public boolean isSetPathsDump() {
+    return this.pathsDump != null;
+  }
+
+  public void setPathsDumpIsSet(boolean value) {
+    if (!value) {
+      this.pathsDump = null;
+    }
+  }
+
+  public long getSeqNum() {
+    return this.seqNum;
+  }
+
+  public void setSeqNum(long seqNum) {
+    this.seqNum = seqNum;
+    setSeqNumIsSet(true);
+  }
+
+  public void unsetSeqNum() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SEQNUM_ISSET_ID);
+  }
+
+  /** Returns true if field seqNum is set (has been assigned a value) and false otherwise */
+  public boolean isSetSeqNum() {
+    return EncodingUtils.testBit(__isset_bitfield, __SEQNUM_ISSET_ID);
+  }
+
+  public void setSeqNumIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SEQNUM_ISSET_ID, value);
+  }
+
+  public int getPathChangesSize() {
+    return (this.pathChanges == null) ? 0 : this.pathChanges.size();
+  }
+
+  public java.util.Iterator<TPathChanges> getPathChangesIterator() {
+    return (this.pathChanges == null) ? null : this.pathChanges.iterator();
+  }
+
+  public void addToPathChanges(TPathChanges elem) {
+    if (this.pathChanges == null) {
+      this.pathChanges = new ArrayList<TPathChanges>();
+    }
+    this.pathChanges.add(elem);
+  }
+
+  public List<TPathChanges> getPathChanges() {
+    return this.pathChanges;
+  }
+
+  public void setPathChanges(List<TPathChanges> pathChanges) {
+    this.pathChanges = pathChanges;
+  }
+
+  public void unsetPathChanges() {
+    this.pathChanges = null;
+  }
+
+  /** Returns true if field pathChanges is set (has been assigned a value) and false otherwise */
+  public boolean isSetPathChanges() {
+    return this.pathChanges != null;
+  }
+
+  public void setPathChangesIsSet(boolean value) {
+    if (!value) {
+      this.pathChanges = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case HAS_FULL_IMAGE:
+      if (value == null) {
+        unsetHasFullImage();
+      } else {
+        setHasFullImage((Boolean)value);
+      }
+      break;
+
+    case PATHS_DUMP:
+      if (value == null) {
+        unsetPathsDump();
+      } else {
+        setPathsDump((TPathsDump)value);
+      }
+      break;
+
+    case SEQ_NUM:
+      if (value == null) {
+        unsetSeqNum();
+      } else {
+        setSeqNum((Long)value);
+      }
+      break;
+
+    case PATH_CHANGES:
+      if (value == null) {
+        unsetPathChanges();
+      } else {
+        setPathChanges((List<TPathChanges>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case HAS_FULL_IMAGE:
+      return Boolean.valueOf(isHasFullImage());
+
+    case PATHS_DUMP:
+      return getPathsDump();
+
+    case SEQ_NUM:
+      return Long.valueOf(getSeqNum());
+
+    case PATH_CHANGES:
+      return getPathChanges();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case HAS_FULL_IMAGE:
+      return isSetHasFullImage();
+    case PATHS_DUMP:
+      return isSetPathsDump();
+    case SEQ_NUM:
+      return isSetSeqNum();
+    case PATH_CHANGES:
+      return isSetPathChanges();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TPathsUpdate)
+      return this.equals((TPathsUpdate)that);
+    return false;
+  }
+
+  public boolean equals(TPathsUpdate that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_hasFullImage = true;
+    boolean that_present_hasFullImage = true;
+    if (this_present_hasFullImage || that_present_hasFullImage) {
+      if (!(this_present_hasFullImage && that_present_hasFullImage))
+        return false;
+      if (this.hasFullImage != that.hasFullImage)
+        return false;
+    }
+
+    boolean this_present_pathsDump = true && this.isSetPathsDump();
+    boolean that_present_pathsDump = true && that.isSetPathsDump();
+    if (this_present_pathsDump || that_present_pathsDump) {
+      if (!(this_present_pathsDump && that_present_pathsDump))
+        return false;
+      if (!this.pathsDump.equals(that.pathsDump))
+        return false;
+    }
+
+    boolean this_present_seqNum = true;
+    boolean that_present_seqNum = true;
+    if (this_present_seqNum || that_present_seqNum) {
+      if (!(this_present_seqNum && that_present_seqNum))
+        return false;
+      if (this.seqNum != that.seqNum)
+        return false;
+    }
+
+    boolean this_present_pathChanges = true && this.isSetPathChanges();
+    boolean that_present_pathChanges = true && that.isSetPathChanges();
+    if (this_present_pathChanges || that_present_pathChanges) {
+      if (!(this_present_pathChanges && that_present_pathChanges))
+        return false;
+      if (!this.pathChanges.equals(that.pathChanges))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_hasFullImage = true;
+    builder.append(present_hasFullImage);
+    if (present_hasFullImage)
+      builder.append(hasFullImage);
+
+    boolean present_pathsDump = true && (isSetPathsDump());
+    builder.append(present_pathsDump);
+    if (present_pathsDump)
+      builder.append(pathsDump);
+
+    boolean present_seqNum = true;
+    builder.append(present_seqNum);
+    if (present_seqNum)
+      builder.append(seqNum);
+
+    boolean present_pathChanges = true && (isSetPathChanges());
+    builder.append(present_pathChanges);
+    if (present_pathChanges)
+      builder.append(pathChanges);
+
+    return builder.toHashCode();
+  }
+
+  public int compareTo(TPathsUpdate other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+    TPathsUpdate typedOther = (TPathsUpdate)other;
+
+    lastComparison = Boolean.valueOf(isSetHasFullImage()).compareTo(typedOther.isSetHasFullImage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHasFullImage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hasFullImage, typedOther.hasFullImage);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPathsDump()).compareTo(typedOther.isSetPathsDump());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPathsDump()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pathsDump, typedOther.pathsDump);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSeqNum()).compareTo(typedOther.isSetSeqNum());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSeqNum()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.seqNum, typedOther.seqNum);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPathChanges()).compareTo(typedOther.isSetPathChanges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPathChanges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pathChanges, typedOther.pathChanges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TPathsUpdate(");
+    boolean first = true;
+
+    sb.append("hasFullImage:");
+    sb.append(this.hasFullImage);
+    first = false;
+    if (isSetPathsDump()) {
+      if (!first) sb.append(", ");
+      sb.append("pathsDump:");
+      if (this.pathsDump == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.pathsDump);
+      }
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("seqNum:");
+    sb.append(this.seqNum);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("pathChanges:");
+    if (this.pathChanges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.pathChanges);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetHasFullImage()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'hasFullImage' is unset! Struct:" + toString());
+    }
+
+    if (!isSetSeqNum()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'seqNum' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPathChanges()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'pathChanges' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (pathsDump != null) {
+      pathsDump.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TPathsUpdateStandardSchemeFactory implements SchemeFactory {
+    public TPathsUpdateStandardScheme getScheme() {
+      return new TPathsUpdateStandardScheme();
+    }
+  }
+
+  private static class TPathsUpdateStandardScheme extends StandardScheme<TPathsUpdate> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TPathsUpdate struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // HAS_FULL_IMAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.hasFullImage = iprot.readBool();
+              struct.setHasFullImageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PATHS_DUMP
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.pathsDump = new TPathsDump();
+              struct.pathsDump.read(iprot);
+              struct.setPathsDumpIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // SEQ_NUM
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.seqNum = iprot.readI64();
+              struct.setSeqNumIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // PATH_CHANGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list50 = iprot.readListBegin();
+                struct.pathChanges = new ArrayList<TPathChanges>(_list50.size);
+                for (int _i51 = 0; _i51 < _list50.size; ++_i51)
+                {
+                  TPathChanges _elem52; // required
+                  _elem52 = new TPathChanges();
+                  _elem52.read(iprot);
+                  struct.pathChanges.add(_elem52);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPathChangesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TPathsUpdate struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(HAS_FULL_IMAGE_FIELD_DESC);
+      oprot.writeBool(struct.hasFullImage);
+      oprot.writeFieldEnd();
+      if (struct.pathsDump != null) {
+        if (struct.isSetPathsDump()) {
+          oprot.writeFieldBegin(PATHS_DUMP_FIELD_DESC);
+          struct.pathsDump.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldBegin(SEQ_NUM_FIELD_DESC);
+      oprot.writeI64(struct.seqNum);
+      oprot.writeFieldEnd();
+      if (struct.pathChanges != null) {
+        oprot.writeFieldBegin(PATH_CHANGES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pathChanges.size()));
+          for (TPathChanges _iter53 : struct.pathChanges)
+          {
+            _iter53.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TPathsUpdateTupleSchemeFactory implements SchemeFactory {
+    public TPathsUpdateTupleScheme getScheme() {
+      return new TPathsUpdateTupleScheme();
+    }
+  }
+
+  private static class TPathsUpdateTupleScheme extends TupleScheme<TPathsUpdate> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TPathsUpdate struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeBool(struct.hasFullImage);
+      oprot.writeI64(struct.seqNum);
+      {
+        oprot.writeI32(struct.pathChanges.size());
+        for (TPathChanges _iter54 : struct.pathChanges)
+        {
+          _iter54.write(oprot);
+        }
+      }
+      BitSet optionals = new BitSet();
+      if (struct.isSetPathsDump()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetPathsDump()) {
+        struct.pathsDump.write(oprot);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TPathsUpdate struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.hasFullImage = iprot.readBool();
+      struct.setHasFullImageIsSet(true);
+      struct.seqNum = iprot.readI64();
+      struct.setSeqNumIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list55 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.pathChanges = new ArrayList<TPathChanges>(_list55.size);
+        for (int _i56 = 0; _i56 < _list55.size; ++_i56)
+        {
+          TPathChanges _elem57; // required
+          _elem57 = new TPathChanges();
+          _elem57.read(iprot);
+          struct.pathChanges.add(_elem57);
+        }
+      }
+      struct.setPathChangesIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.pathsDump = new TPathsDump();
+        struct.pathsDump.read(iprot);
+        struct.setPathsDumpIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPermissionsUpdate.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPermissionsUpdate.java b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPermissionsUpdate.java
new file mode 100644
index 0000000..850404b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/gen/thrift/gen-javabean/org/apache/sentry/hdfs/service/thrift/TPermissionsUpdate.java
@@ -0,0 +1,810 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.sentry.hdfs.service.thrift;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TPermissionsUpdate implements org.apache.thrift.TBase<TPermissionsUpdate, TPermissionsUpdate._Fields>, java.io.Serializable, Cloneable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPermissionsUpdate");
+
+  private static final org.apache.thrift.protocol.TField HASFULL_IMAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("hasfullImage", org.apache.thrift.protocol.TType.BOOL, (short)1);
+  private static final org.apache.thrift.protocol.TField SEQ_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("seqNum", org.apache.thrift.protocol.TType.I64, (short)2);
+  private static final org.apache.thrift.protocol.TField PRIVILEGE_CHANGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privilegeChanges", org.apache.thrift.protocol.TType.MAP, (short)3);
+  private static final org.apache.thrift.protocol.TField ROLE_CHANGES_FIELD_DESC = new org.apache.thrift.protocol.TField("roleChanges", org.apache.thrift.protocol.TType.MAP, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TPermissionsUpdateStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TPermissionsUpdateTupleSchemeFactory());
+  }
+
+  private boolean hasfullImage; // required
+  private long seqNum; // required
+  private Map<String,TPrivilegeChanges> privilegeChanges; // required
+  private Map<String,TRoleChanges> roleChanges; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    HASFULL_IMAGE((short)1, "hasfullImage"),
+    SEQ_NUM((short)2, "seqNum"),
+    PRIVILEGE_CHANGES((short)3, "privilegeChanges"),
+    ROLE_CHANGES((short)4, "roleChanges");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // HASFULL_IMAGE
+          return HASFULL_IMAGE;
+        case 2: // SEQ_NUM
+          return SEQ_NUM;
+        case 3: // PRIVILEGE_CHANGES
+          return PRIVILEGE_CHANGES;
+        case 4: // ROLE_CHANGES
+          return ROLE_CHANGES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __HASFULLIMAGE_ISSET_ID = 0;
+  private static final int __SEQNUM_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.HASFULL_IMAGE, new org.apache.thrift.meta_data.FieldMetaData("hasfullImage", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.SEQ_NUM, new org.apache.thrift.meta_data.FieldMetaData("seqNum", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.PRIVILEGE_CHANGES, new org.apache.thrift.meta_data.FieldMetaData("privilegeChanges", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPrivilegeChanges.class))));
+    tmpMap.put(_Fields.ROLE_CHANGES, new org.apache.thrift.meta_data.FieldMetaData("roleChanges", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRoleChanges.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPermissionsUpdate.class, metaDataMap);
+  }
+
+  public TPermissionsUpdate() {
+  }
+
+  public TPermissionsUpdate(
+    boolean hasfullImage,
+    long seqNum,
+    Map<String,TPrivilegeChanges> privilegeChanges,
+    Map<String,TRoleChanges> roleChanges)
+  {
+    this();
+    this.hasfullImage = hasfullImage;
+    setHasfullImageIsSet(true);
+    this.seqNum = seqNum;
+    setSeqNumIsSet(true);
+    this.privilegeChanges = privilegeChanges;
+    this.roleChanges = roleChanges;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TPermissionsUpdate(TPermissionsUpdate other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.hasfullImage = other.hasfullImage;
+    this.seqNum = other.seqNum;
+    if (other.isSetPrivilegeChanges()) {
+      Map<String,TPrivilegeChanges> __this__privilegeChanges = new HashMap<String,TPrivilegeChanges>();
+      for (Map.Entry<String, TPrivilegeChanges> other_element : other.privilegeChanges.entrySet()) {
+
+        String other_element_key = other_element.getKey();
+        TPrivilegeChanges other_element_value = other_element.getValue();
+
+        String __this__privilegeChanges_copy_key = other_element_key;
+
+        TPrivilegeChanges __this__privilegeChanges_copy_value = new TPrivilegeChanges(other_element_value);
+
+        __this__privilegeChanges.put(__this__privilegeChanges_copy_key, __this__privilegeChanges_copy_value);
+      }
+      this.privilegeChanges = __this__privilegeChanges;
+    }
+    if (other.isSetRoleChanges()) {
+      Map<String,TRoleChanges> __this__roleChanges = new HashMap<String,TRoleChanges>();
+      for (Map.Entry<String, TRoleChanges> other_element : other.roleChanges.entrySet()) {
+
+        String other_element_key = other_element.getKey();
+        TRoleChanges other_element_value = other_element.getValue();
+
+        String __this__roleChanges_copy_key = other_element_key;
+
+        TRoleChanges __this__roleChanges_copy_value = new TRoleChanges(other_element_value);
+
+        __this__roleChanges.put(__this__roleChanges_copy_key, __this__roleChanges_copy_value);
+      }
+      this.roleChanges = __this__roleChanges;
+    }
+  }
+
+  public TPermissionsUpdate deepCopy() {
+    return new TPermissionsUpdate(this);
+  }
+
+  @Override
+  public void clear() {
+    setHasfullImageIsSet(false);
+    this.hasfullImage = false;
+    setSeqNumIsSet(false);
+    this.seqNum = 0;
+    this.privilegeChanges = null;
+    this.roleChanges = null;
+  }
+
+  public boolean isHasfullImage() {
+    return this.hasfullImage;
+  }
+
+  public void setHasfullImage(boolean hasfullImage) {
+    this.hasfullImage = hasfullImage;
+    setHasfullImageIsSet(true);
+  }
+
+  public void unsetHasfullImage() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HASFULLIMAGE_ISSET_ID);
+  }
+
+  /** Returns true if field hasfullImage is set (has been assigned a value) and false otherwise */
+  public boolean isSetHasfullImage() {
+    return EncodingUtils.testBit(__isset_bitfield, __HASFULLIMAGE_ISSET_ID);
+  }
+
+  public void setHasfullImageIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HASFULLIMAGE_ISSET_ID, value);
+  }
+
+  public long getSeqNum() {
+    return this.seqNum;
+  }
+
+  public void setSeqNum(long seqNum) {
+    this.seqNum = seqNum;
+    setSeqNumIsSet(true);
+  }
+
+  public void unsetSeqNum() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SEQNUM_ISSET_ID);
+  }
+
+  /** Returns true if field seqNum is set (has been assigned a value) and false otherwise */
+  public boolean isSetSeqNum() {
+    return EncodingUtils.testBit(__isset_bitfield, __SEQNUM_ISSET_ID);
+  }
+
+  public void setSeqNumIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SEQNUM_ISSET_ID, value);
+  }
+
+  public int getPrivilegeChangesSize() {
+    return (this.privilegeChanges == null) ? 0 : this.privilegeChanges.size();
+  }
+
+  public void putToPrivilegeChanges(String key, TPrivilegeChanges val) {
+    if (this.privilegeChanges == null) {
+      this.privilegeChanges = new HashMap<String,TPrivilegeChanges>();
+    }
+    this.privilegeChanges.put(key, val);
+  }
+
+  public Map<String,TPrivilegeChanges> getPrivilegeChanges() {
+    return this.privilegeChanges;
+  }
+
+  public void setPrivilegeChanges(Map<String,TPrivilegeChanges> privilegeChanges) {
+    this.privilegeChanges = privilegeChanges;
+  }
+
+  public void unsetPrivilegeChanges() {
+    this.privilegeChanges = null;
+  }
+
+  /** Returns true if field privilegeChanges is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrivilegeChanges() {
+    return this.privilegeChanges != null;
+  }
+
+  public void setPrivilegeChangesIsSet(boolean value) {
+    if (!value) {
+      this.privilegeChanges = null;
+    }
+  }
+
+  public int getRoleChangesSize() {
+    return (this.roleChanges == null) ? 0 : this.roleChanges.size();
+  }
+
+  public void putToRoleChanges(String key, TRoleChanges val) {
+    if (this.roleChanges == null) {
+      this.roleChanges = new HashMap<String,TRoleChanges>();
+    }
+    this.roleChanges.put(key, val);
+  }
+
+  public Map<String,TRoleChanges> getRoleChanges() {
+    return this.roleChanges;
+  }
+
+  public void setRoleChanges(Map<String,TRoleChanges> roleChanges) {
+    this.roleChanges = roleChanges;
+  }
+
+  public void unsetRoleChanges() {
+    this.roleChanges = null;
+  }
+
+  /** Returns true if field roleChanges is set (has been assigned a value) and false otherwise */
+  public boolean isSetRoleChanges() {
+    return this.roleChanges != null;
+  }
+
+  public void setRoleChangesIsSet(boolean value) {
+    if (!value) {
+      this.roleChanges = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case HASFULL_IMAGE:
+      if (value == null) {
+        unsetHasfullImage();
+      } else {
+        setHasfullImage((Boolean)value);
+      }
+      break;
+
+    case SEQ_NUM:
+      if (value == null) {
+        unsetSeqNum();
+      } else {
+        setSeqNum((Long)value);
+      }
+      break;
+
+    case PRIVILEGE_CHANGES:
+      if (value == null) {
+        unsetPrivilegeChanges();
+      } else {
+        setPrivilegeChanges((Map<String,TPrivilegeChanges>)value);
+      }
+      break;
+
+    case ROLE_CHANGES:
+      if (value == null) {
+        unsetRoleChanges();
+      } else {
+        setRoleChanges((Map<String,TRoleChanges>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case HASFULL_IMAGE:
+      return Boolean.valueOf(isHasfullImage());
+
+    case SEQ_NUM:
+      return Long.valueOf(getSeqNum());
+
+    case PRIVILEGE_CHANGES:
+      return getPrivilegeChanges();
+
+    case ROLE_CHANGES:
+      return getRoleChanges();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case HASFULL_IMAGE:
+      return isSetHasfullImage();
+    case SEQ_NUM:
+      return isSetSeqNum();
+    case PRIVILEGE_CHANGES:
+      return isSetPrivilegeChanges();
+    case ROLE_CHANGES:
+      return isSetRoleChanges();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TPermissionsUpdate)
+      return this.equals((TPermissionsUpdate)that);
+    return false;
+  }
+
+  public boolean equals(TPermissionsUpdate that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_hasfullImage = true;
+    boolean that_present_hasfullImage = true;
+    if (this_present_hasfullImage || that_present_hasfullImage) {
+      if (!(this_present_hasfullImage && that_present_hasfullImage))
+        return false;
+      if (this.hasfullImage != that.hasfullImage)
+        return false;
+    }
+
+    boolean this_present_seqNum = true;
+    boolean that_present_seqNum = true;
+    if (this_present_seqNum || that_present_seqNum) {
+      if (!(this_present_seqNum && that_present_seqNum))
+        return false;
+      if (this.seqNum != that.seqNum)
+        return false;
+    }
+
+    boolean this_present_privilegeChanges = true && this.isSetPrivilegeChanges();
+    boolean that_present_privilegeChanges = true && that.isSetPrivilegeChanges();
+    if (this_present_privilegeChanges || that_present_privilegeChanges) {
+      if (!(this_present_privilegeChanges && that_present_privilegeChanges))
+        return false;
+      if (!this.privilegeChanges.equals(that.privilegeChanges))
+        return false;
+    }
+
+    boolean this_present_roleChanges = true && this.isSetRoleChanges();
+    boolean that_present_roleChanges = true && that.isSetRoleChanges();
+    if (this_present_roleChanges || that_present_roleChanges) {
+      if (!(this_present_roleChanges && that_present_roleChanges))
+        return false;
+      if (!this.roleChanges.equals(that.roleChanges))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_hasfullImage = true;
+    builder.append(present_hasfullImage);
+    if (present_hasfullImage)
+      builder.append(hasfullImage);
+
+    boolean present_seqNum = true;
+    builder.append(present_seqNum);
+    if (present_seqNum)
+      builder.append(seqNum);
+
+    boolean present_privilegeChanges = true && (isSetPrivilegeChanges());
+    builder.append(present_privilegeChanges);
+    if (present_privilegeChanges)
+      builder.append(privilegeChanges);
+
+    boolean present_roleChanges = true && (isSetRoleChanges());
+    builder.append(present_roleChanges);
+    if (present_roleChanges)
+      builder.append(roleChanges);
+
+    return builder.toHashCode();
+  }
+
+  public int compareTo(TPermissionsUpdate other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+    TPermissionsUpdate typedOther = (TPermissionsUpdate)other;
+
+    lastComparison = Boolean.valueOf(isSetHasfullImage()).compareTo(typedOther.isSetHasfullImage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHasfullImage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hasfullImage, typedOther.hasfullImage);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSeqNum()).compareTo(typedOther.isSetSeqNum());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSeqNum()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.seqNum, typedOther.seqNum);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrivilegeChanges()).compareTo(typedOther.isSetPrivilegeChanges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrivilegeChanges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privilegeChanges, typedOther.privilegeChanges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRoleChanges()).compareTo(typedOther.isSetRoleChanges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRoleChanges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.roleChanges, typedOther.roleChanges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TPermissionsUpdate(");
+    boolean first = true;
+
+    sb.append("hasfullImage:");
+    sb.append(this.hasfullImage);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("seqNum:");
+    sb.append(this.seqNum);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("privilegeChanges:");
+    if (this.privilegeChanges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.privilegeChanges);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("roleChanges:");
+    if (this.roleChanges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.roleChanges);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetHasfullImage()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'hasfullImage' is unset! Struct:" + toString());
+    }
+
+    if (!isSetSeqNum()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'seqNum' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPrivilegeChanges()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'privilegeChanges' is unset! Struct:" + toString());
+    }
+
+    if (!isSetRoleChanges()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'roleChanges' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TPermissionsUpdateStandardSchemeFactory implements SchemeFactory {
+    public TPermissionsUpdateStandardScheme getScheme() {
+      return new TPermissionsUpdateStandardScheme();
+    }
+  }
+
+  private static class TPermissionsUpdateStandardScheme extends StandardScheme<TPermissionsUpdate> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TPermissionsUpdate struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // HASFULL_IMAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.hasfullImage = iprot.readBool();
+              struct.setHasfullImageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // SEQ_NUM
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.seqNum = iprot.readI64();
+              struct.setSeqNumIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PRIVILEGE_CHANGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map94 = iprot.readMapBegin();
+                struct.privilegeChanges = new HashMap<String,TPrivilegeChanges>(2*_map94.size);
+                for (int _i95 = 0; _i95 < _map94.size; ++_i95)
+                {
+                  String _key96; // required
+                  TPrivilegeChanges _val97; // required
+                  _key96 = iprot.readString();
+                  _val97 = new TPrivilegeChanges();
+                  _val97.read(iprot);
+                  struct.privilegeChanges.put(_key96, _val97);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setPrivilegeChangesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // ROLE_CHANGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map98 = iprot.readMapBegin();
+                struct.roleChanges = new HashMap<String,TRoleChanges>(2*_map98.size);
+                for (int _i99 = 0; _i99 < _map98.size; ++_i99)
+                {
+                  String _key100; // required
+                  TRoleChanges _val101; // required
+                  _key100 = iprot.readString();
+                  _val101 = new TRoleChanges();
+                  _val101.read(iprot);
+                  struct.roleChanges.put(_key100, _val101);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setRoleChangesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TPermissionsUpdate struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(HASFULL_IMAGE_FIELD_DESC);
+      oprot.writeBool(struct.hasfullImage);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(SEQ_NUM_FIELD_DESC);
+      oprot.writeI64(struct.seqNum);
+      oprot.writeFieldEnd();
+      if (struct.privilegeChanges != null) {
+        oprot.writeFieldBegin(PRIVILEGE_CHANGES_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.privilegeChanges.size()));
+          for (Map.Entry<String, TPrivilegeChanges> _iter102 : struct.privilegeChanges.entrySet())
+          {
+            oprot.writeString(_iter102.getKey());
+            _iter102.getValue().write(oprot);
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.roleChanges != null) {
+        oprot.writeFieldBegin(ROLE_CHANGES_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.roleChanges.size()));
+          for (Map.Entry<String, TRoleChanges> _iter103 : struct.roleChanges.entrySet())
+          {
+            oprot.writeString(_iter103.getKey());
+            _iter103.getValue().write(oprot);
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TPermissionsUpdateTupleSchemeFactory implements SchemeFactory {
+    public TPermissionsUpdateTupleScheme getScheme() {
+      return new TPermissionsUpdateTupleScheme();
+    }
+  }
+
+  private static class TPermissionsUpdateTupleScheme extends TupleScheme<TPermissionsUpdate> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TPermissionsUpdate struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeBool(struct.hasfullImage);
+      oprot.writeI64(struct.seqNum);
+      {
+        oprot.writeI32(struct.privilegeChanges.size());
+        for (Map.Entry<String, TPrivilegeChanges> _iter104 : struct.privilegeChanges.entrySet())
+        {
+          oprot.writeString(_iter104.getKey());
+          _iter104.getValue().write(oprot);
+        }
+      }
+      {
+        oprot.writeI32(struct.roleChanges.size());
+        for (Map.Entry<String, TRoleChanges> _iter105 : struct.roleChanges.entrySet())
+        {
+          oprot.writeString(_iter105.getKey());
+          _iter105.getValue().write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TPermissionsUpdate struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.hasfullImage = iprot.readBool();
+      struct.setHasfullImageIsSet(true);
+      struct.seqNum = iprot.readI64();
+      struct.setSeqNumIsSet(true);
+      {
+        org.apache.thrift.protocol.TMap _map106 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.privilegeChanges = new HashMap<String,TPrivilegeChanges>(2*_map106.size);
+        for (int _i107 = 0; _i107 < _map106.size; ++_i107)
+        {
+          String _key108; // required
+          TPrivilegeChanges _val109; // required
+          _key108 = iprot.readString();
+          _val109 = new TPrivilegeChanges();
+          _val109.read(iprot);
+          struct.privilegeChanges.put(_key108, _val109);
+        }
+      }
+      struct.setPrivilegeChangesIsSet(true);
+      {
+        org.apache.thrift.protocol.TMap _map110 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.roleChanges = new HashMap<String,TRoleChanges>(2*_map110.size);
+        for (int _i111 = 0; _i111 < _map110.size; ++_i111)
+        {
+          String _key112; // required
+          TRoleChanges _val113; // required
+          _key112 = iprot.readString();
+          _val113 = new TRoleChanges();
+          _val113.read(iprot);
+          struct.roleChanges.put(_key112, _val113);
+        }
+      }
+      struct.setRoleChangesIsSet(true);
+    }
+  }
+
+}
+